aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2012-09-17 14:26:02 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2012-09-17 14:26:02 -0400
commitfa373abbbd01109b849fa331da73ee17748def66 (patch)
tree3a8a53e34891cfc4315b67a843dab1e441eb918b
parent87a2337abdd752d711724d7654a6db1b5b4c0d4d (diff)
parentcd664cc3a574b30988476143c1dcc9298b1fa531 (diff)
Merge branch 'pm-cpufreq'
* pm-cpufreq: cpufreq: OMAP: remove loops_per_jiffy recalculate for smp sections: fix section conflicts in drivers/cpufreq cpufreq: conservative: update frequency when limits are relaxed cpufreq / ondemand: update frequency when limits are relaxed cpufreq: Add a generic cpufreq-cpu0 driver PM / OPP: Initialize OPP table from device tree ARM: add cpufreq transiton notifier to adjust loops_per_jiffy for smp cpufreq: Remove support for hardware P-state chips from powernow-k8 acpi-cpufreq: Add compatibility for legacy AMD cpb sysfs knob acpi-cpufreq: Add support for disabling dynamic overclocking ACPI: Add fixups for AMD P-state figures powernow-k8: delay info messages until initialization has succeeded cpufreq: Add warning message to powernow-k8 acpi-cpufreq: Add quirk to disable _PSD usage on all AMD CPUs acpi-cpufreq: Add support for modern AMD CPUs cpufreq / powernow-k8: Fixup missing _PSS objects message PM / cpufreq: Initialise the cpu field during conservative governor start
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu11
-rw-r--r--Documentation/cpu-freq/boost.txt93
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt55
-rw-r--r--Documentation/devicetree/bindings/power/opp.txt25
-rw-r--r--arch/arm/kernel/smp.c54
-rw-r--r--arch/x86/include/asm/msr-index.h3
-rw-r--r--drivers/acpi/processor_perflib.c30
-rw-r--r--drivers/base/power/opp.c47
-rw-r--r--drivers/cpufreq/Kconfig11
-rw-r--r--drivers/cpufreq/Kconfig.x8618
-rw-r--r--drivers/cpufreq/Makefile4
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c272
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c269
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c1
-rw-r--r--drivers/cpufreq/longhaul.h26
-rw-r--r--drivers/cpufreq/omap-cpufreq.c35
-rw-r--r--drivers/cpufreq/powernow-k8.c406
-rw-r--r--drivers/cpufreq/powernow-k8.h32
-rw-r--r--include/linux/opp.h8
20 files changed, 947 insertions, 455 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 5dab36448b44..6943133afcb8 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -176,3 +176,14 @@ Description: Disable L3 cache indices
176 All AMD processors with L3 caches provide this functionality. 176 All AMD processors with L3 caches provide this functionality.
177 For details, see BKDGs at 177 For details, see BKDGs at
178 http://developer.amd.com/documentation/guides/Pages/default.aspx 178 http://developer.amd.com/documentation/guides/Pages/default.aspx
179
180
181What: /sys/devices/system/cpu/cpufreq/boost
182Date: August 2012
183Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
184Description: Processor frequency boosting control
185
186 This switch controls the boost setting for the whole system.
187 Boosting allows the CPU and the firmware to run at a frequency
188 beyound it's nominal limit.
189 More details can be found in Documentation/cpu-freq/boost.txt
diff --git a/Documentation/cpu-freq/boost.txt b/Documentation/cpu-freq/boost.txt
new file mode 100644
index 000000000000..9b4edfcf486f
--- /dev/null
+++ b/Documentation/cpu-freq/boost.txt
@@ -0,0 +1,93 @@
1Processor boosting control
2
3 - information for users -
4
5Quick guide for the impatient:
6--------------------
7/sys/devices/system/cpu/cpufreq/boost
8controls the boost setting for the whole system. You can read and write
9that file with either "0" (boosting disabled) or "1" (boosting allowed).
10Reading or writing 1 does not mean that the system is boosting at this
11very moment, but only that the CPU _may_ raise the frequency at it's
12discretion.
13--------------------
14
15Introduction
16-------------
17Some CPUs support a functionality to raise the operating frequency of
18some cores in a multi-core package if certain conditions apply, mostly
19if the whole chip is not fully utilized and below it's intended thermal
20budget. This is done without operating system control by a combination
21of hardware and firmware.
22On Intel CPUs this is called "Turbo Boost", AMD calls it "Turbo-Core",
23in technical documentation "Core performance boost". In Linux we use
24the term "boost" for convenience.
25
26Rationale for disable switch
27----------------------------
28
29Though the idea is to just give better performance without any user
30intervention, sometimes the need arises to disable this functionality.
31Most systems offer a switch in the (BIOS) firmware to disable the
32functionality at all, but a more fine-grained and dynamic control would
33be desirable:
341. While running benchmarks, reproducible results are important. Since
35 the boosting functionality depends on the load of the whole package,
36 single thread performance can vary. By explicitly disabling the boost
37 functionality at least for the benchmark's run-time the system will run
38 at a fixed frequency and results are reproducible again.
392. To examine the impact of the boosting functionality it is helpful
40 to do tests with and without boosting.
413. Boosting means overclocking the processor, though under controlled
42 conditions. By raising the frequency and the voltage the processor
43 will consume more power than without the boosting, which may be
44 undesirable for instance for mobile users. Disabling boosting may
45 save power here, though this depends on the workload.
46
47
48User controlled switch
49----------------------
50
51To allow the user to toggle the boosting functionality, the acpi-cpufreq
52driver exports a sysfs knob to disable it. There is a file:
53/sys/devices/system/cpu/cpufreq/boost
54which can either read "0" (boosting disabled) or "1" (boosting enabled).
55Reading the file is always supported, even if the processor does not
56support boosting. In this case the file will be read-only and always
57reads as "0". Explicitly changing the permissions and writing to that
58file anyway will return EINVAL.
59
60On supported CPUs one can write either a "0" or a "1" into this file.
61This will either disable the boost functionality on all cores in the
62whole system (0) or will allow the hardware to boost at will (1).
63
64Writing a "1" does not explicitly boost the system, but just allows the
65CPU (and the firmware) to boost at their discretion. Some implementations
66take external factors like the chip's temperature into account, so
67boosting once does not necessarily mean that it will occur every time
68even using the exact same software setup.
69
70
71AMD legacy cpb switch
72---------------------
73The AMD powernow-k8 driver used to support a very similar switch to
74disable or enable the "Core Performance Boost" feature of some AMD CPUs.
75This switch was instantiated in each CPU's cpufreq directory
76(/sys/devices/system/cpu[0-9]*/cpufreq) and was called "cpb".
77Though the per CPU existence hints at a more fine grained control, the
78actual implementation only supported a system-global switch semantics,
79which was simply reflected into each CPU's file. Writing a 0 or 1 into it
80would pull the other CPUs to the same state.
81For compatibility reasons this file and its behavior is still supported
82on AMD CPUs, though it is now protected by a config switch
83(X86_ACPI_CPUFREQ_CPB). On Intel CPUs this file will never be created,
84even with the config option set.
85This functionality is considered legacy and will be removed in some future
86kernel version.
87
88More fine grained boosting control
89----------------------------------
90
91Technically it is possible to switch the boosting functionality at least
92on a per package basis, for some CPUs even per core. Currently the driver
93does not support it, but this may be implemented in the future.
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
new file mode 100644
index 000000000000..4416ccc33472
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
@@ -0,0 +1,55 @@
1Generic CPU0 cpufreq driver
2
3It is a generic cpufreq driver for CPU0 frequency management. It
4supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
5systems which share clock and voltage across all CPUs.
6
7Both required and optional properties listed below must be defined
8under node /cpus/cpu@0.
9
10Required properties:
11- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt
12 for details
13
14Optional properties:
15- clock-latency: Specify the possible maximum transition latency for clock,
16 in unit of nanoseconds.
17- voltage-tolerance: Specify the CPU voltage tolerance in percentage.
18
19Examples:
20
21cpus {
22 #address-cells = <1>;
23 #size-cells = <0>;
24
25 cpu@0 {
26 compatible = "arm,cortex-a9";
27 reg = <0>;
28 next-level-cache = <&L2>;
29 operating-points = <
30 /* kHz uV */
31 792000 1100000
32 396000 950000
33 198000 850000
34 >;
35 transition-latency = <61036>; /* two CLK32 periods */
36 };
37
38 cpu@1 {
39 compatible = "arm,cortex-a9";
40 reg = <1>;
41 next-level-cache = <&L2>;
42 };
43
44 cpu@2 {
45 compatible = "arm,cortex-a9";
46 reg = <2>;
47 next-level-cache = <&L2>;
48 };
49
50 cpu@3 {
51 compatible = "arm,cortex-a9";
52 reg = <3>;
53 next-level-cache = <&L2>;
54 };
55};
diff --git a/Documentation/devicetree/bindings/power/opp.txt b/Documentation/devicetree/bindings/power/opp.txt
new file mode 100644
index 000000000000..74499e5033fc
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/opp.txt
@@ -0,0 +1,25 @@
1* Generic OPP Interface
2
3SoCs have a standard set of tuples consisting of frequency and
4voltage pairs that the device will support per voltage domain. These
5are called Operating Performance Points or OPPs.
6
7Properties:
8- operating-points: An array of 2-tuples items, and each item consists
9 of frequency and voltage like <freq-kHz vol-uV>.
10 freq: clock frequency in kHz
11 vol: voltage in microvolt
12
13Examples:
14
15cpu@0 {
16 compatible = "arm,cortex-a9";
17 reg = <0>;
18 next-level-cache = <&L2>;
19 operating-points = <
20 /* kHz uV */
21 792000 1100000
22 396000 950000
23 198000 850000
24 >;
25};
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index ebd8ad274d76..8e03567c9583 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -25,6 +25,7 @@
25#include <linux/percpu.h> 25#include <linux/percpu.h>
26#include <linux/clockchips.h> 26#include <linux/clockchips.h>
27#include <linux/completion.h> 27#include <linux/completion.h>
28#include <linux/cpufreq.h>
28 29
29#include <linux/atomic.h> 30#include <linux/atomic.h>
30#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
@@ -584,3 +585,56 @@ int setup_profiling_timer(unsigned int multiplier)
584{ 585{
585 return -EINVAL; 586 return -EINVAL;
586} 587}
588
589#ifdef CONFIG_CPU_FREQ
590
591static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
592static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
593static unsigned long global_l_p_j_ref;
594static unsigned long global_l_p_j_ref_freq;
595
596static int cpufreq_callback(struct notifier_block *nb,
597 unsigned long val, void *data)
598{
599 struct cpufreq_freqs *freq = data;
600 int cpu = freq->cpu;
601
602 if (freq->flags & CPUFREQ_CONST_LOOPS)
603 return NOTIFY_OK;
604
605 if (!per_cpu(l_p_j_ref, cpu)) {
606 per_cpu(l_p_j_ref, cpu) =
607 per_cpu(cpu_data, cpu).loops_per_jiffy;
608 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
609 if (!global_l_p_j_ref) {
610 global_l_p_j_ref = loops_per_jiffy;
611 global_l_p_j_ref_freq = freq->old;
612 }
613 }
614
615 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
616 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
617 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
618 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
619 global_l_p_j_ref_freq,
620 freq->new);
621 per_cpu(cpu_data, cpu).loops_per_jiffy =
622 cpufreq_scale(per_cpu(l_p_j_ref, cpu),
623 per_cpu(l_p_j_ref_freq, cpu),
624 freq->new);
625 }
626 return NOTIFY_OK;
627}
628
629static struct notifier_block cpufreq_notifier = {
630 .notifier_call = cpufreq_callback,
631};
632
633static int __init register_cpufreq_notifier(void)
634{
635 return cpufreq_register_notifier(&cpufreq_notifier,
636 CPUFREQ_TRANSITION_NOTIFIER);
637}
638core_initcall(register_cpufreq_notifier);
639
640#endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 957ec87385af..fbee9714d9ab 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -248,6 +248,9 @@
248 248
249#define MSR_IA32_PERF_STATUS 0x00000198 249#define MSR_IA32_PERF_STATUS 0x00000198
250#define MSR_IA32_PERF_CTL 0x00000199 250#define MSR_IA32_PERF_CTL 0x00000199
251#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064
252#define MSR_AMD_PERF_STATUS 0xc0010063
253#define MSR_AMD_PERF_CTL 0xc0010062
251 254
252#define MSR_IA32_MPERF 0x000000e7 255#define MSR_IA32_MPERF 0x000000e7
253#define MSR_IA32_APERF 0x000000e8 256#define MSR_IA32_APERF 0x000000e8
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index a093dc163a42..836bfe069042 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -324,6 +324,34 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
324 return result; 324 return result;
325} 325}
326 326
327#ifdef CONFIG_X86
328/*
329 * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
330 * in their ACPI data. Calculate the real values and fix up the _PSS data.
331 */
332static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
333{
334 u32 hi, lo, fid, did;
335 int index = px->control & 0x00000007;
336
337 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
338 return;
339
340 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
341 || boot_cpu_data.x86 == 0x11) {
342 rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
343 fid = lo & 0x3f;
344 did = (lo >> 6) & 7;
345 if (boot_cpu_data.x86 == 0x10)
346 px->core_frequency = (100 * (fid + 0x10)) >> did;
347 else
348 px->core_frequency = (100 * (fid + 8)) >> did;
349 }
350}
351#else
352static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
353#endif
354
327static int acpi_processor_get_performance_states(struct acpi_processor *pr) 355static int acpi_processor_get_performance_states(struct acpi_processor *pr)
328{ 356{
329 int result = 0; 357 int result = 0;
@@ -379,6 +407,8 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
379 goto end; 407 goto end;
380 } 408 }
381 409
410 amd_fixup_frequency(px, i);
411
382 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 412 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
383 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n", 413 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
384 i, 414 i,
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index ac993eafec82..d9468642fc41 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -22,6 +22,7 @@
22#include <linux/rculist.h> 22#include <linux/rculist.h>
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/opp.h> 24#include <linux/opp.h>
25#include <linux/of.h>
25 26
26/* 27/*
27 * Internal data structure organization with the OPP layer library is as 28 * Internal data structure organization with the OPP layer library is as
@@ -674,3 +675,49 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev)
674 675
675 return &dev_opp->head; 676 return &dev_opp->head;
676} 677}
678
679#ifdef CONFIG_OF
680/**
681 * of_init_opp_table() - Initialize opp table from device tree
682 * @dev: device pointer used to lookup device OPPs.
683 *
684 * Register the initial OPP table with the OPP library for given device.
685 */
686int of_init_opp_table(struct device *dev)
687{
688 const struct property *prop;
689 const __be32 *val;
690 int nr;
691
692 prop = of_find_property(dev->of_node, "operating-points", NULL);
693 if (!prop)
694 return -ENODEV;
695 if (!prop->value)
696 return -ENODATA;
697
698 /*
699 * Each OPP is a set of tuples consisting of frequency and
700 * voltage like <freq-kHz vol-uV>.
701 */
702 nr = prop->length / sizeof(u32);
703 if (nr % 2) {
704 dev_err(dev, "%s: Invalid OPP list\n", __func__);
705 return -EINVAL;
706 }
707
708 val = prop->value;
709 while (nr) {
710 unsigned long freq = be32_to_cpup(val++) * 1000;
711 unsigned long volt = be32_to_cpup(val++);
712
713 if (opp_add(dev, freq, volt)) {
714 dev_warn(dev, "%s: Failed to add OPP %ld\n",
715 __func__, freq);
716 continue;
717 }
718 nr -= 2;
719 }
720
721 return 0;
722}
723#endif
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index e24a2a1b6666..ea512f47b789 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -179,6 +179,17 @@ config CPU_FREQ_GOV_CONSERVATIVE
179 179
180 If in doubt, say N. 180 If in doubt, say N.
181 181
182config GENERIC_CPUFREQ_CPU0
183 bool "Generic CPU0 cpufreq driver"
184 depends on HAVE_CLK && REGULATOR && PM_OPP && OF
185 select CPU_FREQ_TABLE
186 help
187 This adds a generic cpufreq driver for CPU0 frequency management.
188 It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
189 systems which share clock and voltage across all CPUs.
190
191 If in doubt, say N.
192
182menu "x86 CPU frequency scaling drivers" 193menu "x86 CPU frequency scaling drivers"
183depends on X86 194depends on X86
184source "drivers/cpufreq/Kconfig.x86" 195source "drivers/cpufreq/Kconfig.x86"
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 78ff7ee48951..934854ae5eb4 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -23,7 +23,8 @@ config X86_ACPI_CPUFREQ
23 help 23 help
24 This driver adds a CPUFreq driver which utilizes the ACPI 24 This driver adds a CPUFreq driver which utilizes the ACPI
25 Processor Performance States. 25 Processor Performance States.
26 This driver also supports Intel Enhanced Speedstep. 26 This driver also supports Intel Enhanced Speedstep and newer
27 AMD CPUs.
27 28
28 To compile this driver as a module, choose M here: the 29 To compile this driver as a module, choose M here: the
29 module will be called acpi-cpufreq. 30 module will be called acpi-cpufreq.
@@ -32,6 +33,18 @@ config X86_ACPI_CPUFREQ
32 33
33 If in doubt, say N. 34 If in doubt, say N.
34 35
36config X86_ACPI_CPUFREQ_CPB
37 default y
38 bool "Legacy cpb sysfs knob support for AMD CPUs"
39 depends on X86_ACPI_CPUFREQ && CPU_SUP_AMD
40 help
41 The powernow-k8 driver used to provide a sysfs knob called "cpb"
42 to disable the Core Performance Boosting feature of AMD CPUs. This
43 file has now been superseeded by the more generic "boost" entry.
44
45 By enabling this option the acpi_cpufreq driver provides the old
46 entry in addition to the new boost ones, for compatibility reasons.
47
35config ELAN_CPUFREQ 48config ELAN_CPUFREQ
36 tristate "AMD Elan SC400 and SC410" 49 tristate "AMD Elan SC400 and SC410"
37 select CPU_FREQ_TABLE 50 select CPU_FREQ_TABLE
@@ -95,7 +108,8 @@ config X86_POWERNOW_K8
95 select CPU_FREQ_TABLE 108 select CPU_FREQ_TABLE
96 depends on ACPI && ACPI_PROCESSOR 109 depends on ACPI && ACPI_PROCESSOR
97 help 110 help
98 This adds the CPUFreq driver for K8/K10 Opteron/Athlon64 processors. 111 This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
112 Support for K10 and newer processors is now in acpi-cpufreq.
99 113
100 To compile this driver as a module, choose M here: the 114 To compile this driver as a module, choose M here: the
101 module will be called powernow-k8. 115 module will be called powernow-k8.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 9531fc2eda22..1bc90e1306d8 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -13,13 +13,15 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
13# CPUfreq cross-arch helpers 13# CPUfreq cross-arch helpers
14obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o 14obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
15 15
16obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
17
16################################################################################## 18##################################################################################
17# x86 drivers. 19# x86 drivers.
18# Link order matters. K8 is preferred to ACPI because of firmware bugs in early 20# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
19# K8 systems. ACPI is preferred to all other hardware-specific drivers. 21# K8 systems. ACPI is preferred to all other hardware-specific drivers.
20# speedstep-* is preferred over p4-clockmod. 22# speedstep-* is preferred over p4-clockmod.
21 23
22obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o 24obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
23obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o 25obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
24obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o 26obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
25obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o 27obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 56c6c6b4eb4d..0d048f6a2b23 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -51,13 +51,19 @@ MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
51MODULE_DESCRIPTION("ACPI Processor P-States Driver"); 51MODULE_DESCRIPTION("ACPI Processor P-States Driver");
52MODULE_LICENSE("GPL"); 52MODULE_LICENSE("GPL");
53 53
54#define PFX "acpi-cpufreq: "
55
54enum { 56enum {
55 UNDEFINED_CAPABLE = 0, 57 UNDEFINED_CAPABLE = 0,
56 SYSTEM_INTEL_MSR_CAPABLE, 58 SYSTEM_INTEL_MSR_CAPABLE,
59 SYSTEM_AMD_MSR_CAPABLE,
57 SYSTEM_IO_CAPABLE, 60 SYSTEM_IO_CAPABLE,
58}; 61};
59 62
60#define INTEL_MSR_RANGE (0xffff) 63#define INTEL_MSR_RANGE (0xffff)
64#define AMD_MSR_RANGE (0x7)
65
66#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
61 67
62struct acpi_cpufreq_data { 68struct acpi_cpufreq_data {
63 struct acpi_processor_performance *acpi_data; 69 struct acpi_processor_performance *acpi_data;
@@ -74,6 +80,116 @@ static struct acpi_processor_performance __percpu *acpi_perf_data;
74static struct cpufreq_driver acpi_cpufreq_driver; 80static struct cpufreq_driver acpi_cpufreq_driver;
75 81
76static unsigned int acpi_pstate_strict; 82static unsigned int acpi_pstate_strict;
83static bool boost_enabled, boost_supported;
84static struct msr __percpu *msrs;
85
86static bool boost_state(unsigned int cpu)
87{
88 u32 lo, hi;
89 u64 msr;
90
91 switch (boot_cpu_data.x86_vendor) {
92 case X86_VENDOR_INTEL:
93 rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
94 msr = lo | ((u64)hi << 32);
95 return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
96 case X86_VENDOR_AMD:
97 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
98 msr = lo | ((u64)hi << 32);
99 return !(msr & MSR_K7_HWCR_CPB_DIS);
100 }
101 return false;
102}
103
104static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
105{
106 u32 cpu;
107 u32 msr_addr;
108 u64 msr_mask;
109
110 switch (boot_cpu_data.x86_vendor) {
111 case X86_VENDOR_INTEL:
112 msr_addr = MSR_IA32_MISC_ENABLE;
113 msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
114 break;
115 case X86_VENDOR_AMD:
116 msr_addr = MSR_K7_HWCR;
117 msr_mask = MSR_K7_HWCR_CPB_DIS;
118 break;
119 default:
120 return;
121 }
122
123 rdmsr_on_cpus(cpumask, msr_addr, msrs);
124
125 for_each_cpu(cpu, cpumask) {
126 struct msr *reg = per_cpu_ptr(msrs, cpu);
127 if (enable)
128 reg->q &= ~msr_mask;
129 else
130 reg->q |= msr_mask;
131 }
132
133 wrmsr_on_cpus(cpumask, msr_addr, msrs);
134}
135
136static ssize_t _store_boost(const char *buf, size_t count)
137{
138 int ret;
139 unsigned long val = 0;
140
141 if (!boost_supported)
142 return -EINVAL;
143
144 ret = kstrtoul(buf, 10, &val);
145 if (ret || (val > 1))
146 return -EINVAL;
147
148 if ((val && boost_enabled) || (!val && !boost_enabled))
149 return count;
150
151 get_online_cpus();
152
153 boost_set_msrs(val, cpu_online_mask);
154
155 put_online_cpus();
156
157 boost_enabled = val;
158 pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
159
160 return count;
161}
162
163static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
164 const char *buf, size_t count)
165{
166 return _store_boost(buf, count);
167}
168
169static ssize_t show_global_boost(struct kobject *kobj,
170 struct attribute *attr, char *buf)
171{
172 return sprintf(buf, "%u\n", boost_enabled);
173}
174
175static struct global_attr global_boost = __ATTR(boost, 0644,
176 show_global_boost,
177 store_global_boost);
178
179#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
180static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
181 size_t count)
182{
183 return _store_boost(buf, count);
184}
185
186static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
187{
188 return sprintf(buf, "%u\n", boost_enabled);
189}
190
191static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb);
192#endif
77 193
78static int check_est_cpu(unsigned int cpuid) 194static int check_est_cpu(unsigned int cpuid)
79{ 195{
@@ -82,6 +198,13 @@ static int check_est_cpu(unsigned int cpuid)
82 return cpu_has(cpu, X86_FEATURE_EST); 198 return cpu_has(cpu, X86_FEATURE_EST);
83} 199}
84 200
201static int check_amd_hwpstate_cpu(unsigned int cpuid)
202{
203 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
204
205 return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
206}
207
85static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) 208static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
86{ 209{
87 struct acpi_processor_performance *perf; 210 struct acpi_processor_performance *perf;
@@ -101,7 +224,11 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
101 int i; 224 int i;
102 struct acpi_processor_performance *perf; 225 struct acpi_processor_performance *perf;
103 226
104 msr &= INTEL_MSR_RANGE; 227 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
228 msr &= AMD_MSR_RANGE;
229 else
230 msr &= INTEL_MSR_RANGE;
231
105 perf = data->acpi_data; 232 perf = data->acpi_data;
106 233
107 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { 234 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
@@ -115,6 +242,7 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
115{ 242{
116 switch (data->cpu_feature) { 243 switch (data->cpu_feature) {
117 case SYSTEM_INTEL_MSR_CAPABLE: 244 case SYSTEM_INTEL_MSR_CAPABLE:
245 case SYSTEM_AMD_MSR_CAPABLE:
118 return extract_msr(val, data); 246 return extract_msr(val, data);
119 case SYSTEM_IO_CAPABLE: 247 case SYSTEM_IO_CAPABLE:
120 return extract_io(val, data); 248 return extract_io(val, data);
@@ -150,6 +278,7 @@ static void do_drv_read(void *_cmd)
150 278
151 switch (cmd->type) { 279 switch (cmd->type) {
152 case SYSTEM_INTEL_MSR_CAPABLE: 280 case SYSTEM_INTEL_MSR_CAPABLE:
281 case SYSTEM_AMD_MSR_CAPABLE:
153 rdmsr(cmd->addr.msr.reg, cmd->val, h); 282 rdmsr(cmd->addr.msr.reg, cmd->val, h);
154 break; 283 break;
155 case SYSTEM_IO_CAPABLE: 284 case SYSTEM_IO_CAPABLE:
@@ -174,6 +303,9 @@ static void do_drv_write(void *_cmd)
174 lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); 303 lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
175 wrmsr(cmd->addr.msr.reg, lo, hi); 304 wrmsr(cmd->addr.msr.reg, lo, hi);
176 break; 305 break;
306 case SYSTEM_AMD_MSR_CAPABLE:
307 wrmsr(cmd->addr.msr.reg, cmd->val, 0);
308 break;
177 case SYSTEM_IO_CAPABLE: 309 case SYSTEM_IO_CAPABLE:
178 acpi_os_write_port((acpi_io_address)cmd->addr.io.port, 310 acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
179 cmd->val, 311 cmd->val,
@@ -217,6 +349,10 @@ static u32 get_cur_val(const struct cpumask *mask)
217 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 349 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
218 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; 350 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
219 break; 351 break;
352 case SYSTEM_AMD_MSR_CAPABLE:
353 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
354 cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
355 break;
220 case SYSTEM_IO_CAPABLE: 356 case SYSTEM_IO_CAPABLE:
221 cmd.type = SYSTEM_IO_CAPABLE; 357 cmd.type = SYSTEM_IO_CAPABLE;
222 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; 358 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
@@ -326,6 +462,11 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
326 cmd.addr.msr.reg = MSR_IA32_PERF_CTL; 462 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
327 cmd.val = (u32) perf->states[next_perf_state].control; 463 cmd.val = (u32) perf->states[next_perf_state].control;
328 break; 464 break;
465 case SYSTEM_AMD_MSR_CAPABLE:
466 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
467 cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
468 cmd.val = (u32) perf->states[next_perf_state].control;
469 break;
329 case SYSTEM_IO_CAPABLE: 470 case SYSTEM_IO_CAPABLE:
330 cmd.type = SYSTEM_IO_CAPABLE; 471 cmd.type = SYSTEM_IO_CAPABLE;
331 cmd.addr.io.port = perf->control_register.address; 472 cmd.addr.io.port = perf->control_register.address;
@@ -419,6 +560,44 @@ static void free_acpi_perf_data(void)
419 free_percpu(acpi_perf_data); 560 free_percpu(acpi_perf_data);
420} 561}
421 562
563static int boost_notify(struct notifier_block *nb, unsigned long action,
564 void *hcpu)
565{
566 unsigned cpu = (long)hcpu;
567 const struct cpumask *cpumask;
568
569 cpumask = get_cpu_mask(cpu);
570
571 /*
572 * Clear the boost-disable bit on the CPU_DOWN path so that
573 * this cpu cannot block the remaining ones from boosting. On
574 * the CPU_UP path we simply keep the boost-disable flag in
575 * sync with the current global state.
576 */
577
578 switch (action) {
579 case CPU_UP_PREPARE:
580 case CPU_UP_PREPARE_FROZEN:
581 boost_set_msrs(boost_enabled, cpumask);
582 break;
583
584 case CPU_DOWN_PREPARE:
585 case CPU_DOWN_PREPARE_FROZEN:
586 boost_set_msrs(1, cpumask);
587 break;
588
589 default:
590 break;
591 }
592
593 return NOTIFY_OK;
594}
595
596
597static struct notifier_block boost_nb = {
598 .notifier_call = boost_notify,
599};
600
422/* 601/*
423 * acpi_cpufreq_early_init - initialize ACPI P-States library 602 * acpi_cpufreq_early_init - initialize ACPI P-States library
424 * 603 *
@@ -559,6 +738,14 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
559 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 738 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
560 cpumask_copy(policy->cpus, cpu_core_mask(cpu)); 739 cpumask_copy(policy->cpus, cpu_core_mask(cpu));
561 } 740 }
741
742 if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
743 cpumask_clear(policy->cpus);
744 cpumask_set_cpu(cpu, policy->cpus);
745 cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu));
746 policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
747 pr_info_once(PFX "overriding BIOS provided _PSD data\n");
748 }
562#endif 749#endif
563 750
564 /* capability check */ 751 /* capability check */
@@ -580,12 +767,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
580 break; 767 break;
581 case ACPI_ADR_SPACE_FIXED_HARDWARE: 768 case ACPI_ADR_SPACE_FIXED_HARDWARE:
582 pr_debug("HARDWARE addr space\n"); 769 pr_debug("HARDWARE addr space\n");
583 if (!check_est_cpu(cpu)) { 770 if (check_est_cpu(cpu)) {
584 result = -ENODEV; 771 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
585 goto err_unreg; 772 break;
586 } 773 }
587 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; 774 if (check_amd_hwpstate_cpu(cpu)) {
588 break; 775 data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
776 break;
777 }
778 result = -ENODEV;
779 goto err_unreg;
589 default: 780 default:
590 pr_debug("Unknown addr space %d\n", 781 pr_debug("Unknown addr space %d\n",
591 (u32) (perf->control_register.space_id)); 782 (u32) (perf->control_register.space_id));
@@ -718,6 +909,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
718 909
719static struct freq_attr *acpi_cpufreq_attr[] = { 910static struct freq_attr *acpi_cpufreq_attr[] = {
720 &cpufreq_freq_attr_scaling_available_freqs, 911 &cpufreq_freq_attr_scaling_available_freqs,
912 NULL, /* this is a placeholder for cpb, do not remove */
721 NULL, 913 NULL,
722}; 914};
723 915
@@ -733,6 +925,49 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
733 .attr = acpi_cpufreq_attr, 925 .attr = acpi_cpufreq_attr,
734}; 926};
735 927
928static void __init acpi_cpufreq_boost_init(void)
929{
930 if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
931 msrs = msrs_alloc();
932
933 if (!msrs)
934 return;
935
936 boost_supported = true;
937 boost_enabled = boost_state(0);
938
939 get_online_cpus();
940
941 /* Force all MSRs to the same value */
942 boost_set_msrs(boost_enabled, cpu_online_mask);
943
944 register_cpu_notifier(&boost_nb);
945
946 put_online_cpus();
947 } else
948 global_boost.attr.mode = 0444;
949
950 /* We create the boost file in any case, though for systems without
951 * hardware support it will be read-only and hardwired to return 0.
952 */
953 if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr)))
954 pr_warn(PFX "could not register global boost sysfs file\n");
955 else
956 pr_debug("registered global boost sysfs file\n");
957}
958
959static void __exit acpi_cpufreq_boost_exit(void)
960{
961 sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr));
962
963 if (msrs) {
964 unregister_cpu_notifier(&boost_nb);
965
966 msrs_free(msrs);
967 msrs = NULL;
968 }
969}
970
736static int __init acpi_cpufreq_init(void) 971static int __init acpi_cpufreq_init(void)
737{ 972{
738 int ret; 973 int ret;
@@ -746,9 +981,32 @@ static int __init acpi_cpufreq_init(void)
746 if (ret) 981 if (ret)
747 return ret; 982 return ret;
748 983
984#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
985 /* this is a sysfs file with a strange name and an even stranger
986 * semantic - per CPU instantiation, but system global effect.
987 * Lets enable it only on AMD CPUs for compatibility reasons and
988 * only if configured. This is considered legacy code, which
989 * will probably be removed at some point in the future.
990 */
991 if (check_amd_hwpstate_cpu(0)) {
992 struct freq_attr **iter;
993
994 pr_debug("adding sysfs entry for cpb\n");
995
996 for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
997 ;
998
999 /* make sure there is a terminator behind it */
1000 if (iter[1] == NULL)
1001 *iter = &cpb;
1002 }
1003#endif
1004
749 ret = cpufreq_register_driver(&acpi_cpufreq_driver); 1005 ret = cpufreq_register_driver(&acpi_cpufreq_driver);
750 if (ret) 1006 if (ret)
751 free_acpi_perf_data(); 1007 free_acpi_perf_data();
1008 else
1009 acpi_cpufreq_boost_init();
752 1010
753 return ret; 1011 return ret;
754} 1012}
@@ -757,6 +1015,8 @@ static void __exit acpi_cpufreq_exit(void)
757{ 1015{
758 pr_debug("acpi_cpufreq_exit\n"); 1016 pr_debug("acpi_cpufreq_exit\n");
759 1017
1018 acpi_cpufreq_boost_exit();
1019
760 cpufreq_unregister_driver(&acpi_cpufreq_driver); 1020 cpufreq_unregister_driver(&acpi_cpufreq_driver);
761 1021
762 free_acpi_perf_data(); 1022 free_acpi_perf_data();
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
new file mode 100644
index 000000000000..e9158278c71d
--- /dev/null
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -0,0 +1,269 @@
1/*
2 * Copyright (C) 2012 Freescale Semiconductor, Inc.
3 *
4 * The OPP code in function cpu0_set_target() is reused from
5 * drivers/cpufreq/omap-cpufreq.c
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/clk.h>
15#include <linux/cpu.h>
16#include <linux/cpufreq.h>
17#include <linux/err.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/opp.h>
21#include <linux/regulator/consumer.h>
22#include <linux/slab.h>
23
24static unsigned int transition_latency;
25static unsigned int voltage_tolerance; /* in percentage */
26
27static struct device *cpu_dev;
28static struct clk *cpu_clk;
29static struct regulator *cpu_reg;
30static struct cpufreq_frequency_table *freq_table;
31
32static int cpu0_verify_speed(struct cpufreq_policy *policy)
33{
34 return cpufreq_frequency_table_verify(policy, freq_table);
35}
36
37static unsigned int cpu0_get_speed(unsigned int cpu)
38{
39 return clk_get_rate(cpu_clk) / 1000;
40}
41
42static int cpu0_set_target(struct cpufreq_policy *policy,
43 unsigned int target_freq, unsigned int relation)
44{
45 struct cpufreq_freqs freqs;
46 struct opp *opp;
47 unsigned long freq_Hz, volt = 0, volt_old = 0, tol = 0;
48 unsigned int index, cpu;
49 int ret;
50
51 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
52 relation, &index);
53 if (ret) {
54 pr_err("failed to match target freqency %d: %d\n",
55 target_freq, ret);
56 return ret;
57 }
58
59 freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
60 if (freq_Hz < 0)
61 freq_Hz = freq_table[index].frequency * 1000;
62 freqs.new = freq_Hz / 1000;
63 freqs.old = clk_get_rate(cpu_clk) / 1000;
64
65 if (freqs.old == freqs.new)
66 return 0;
67
68 for_each_online_cpu(cpu) {
69 freqs.cpu = cpu;
70 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
71 }
72
73 if (cpu_reg) {
74 opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
75 if (IS_ERR(opp)) {
76 pr_err("failed to find OPP for %ld\n", freq_Hz);
77 return PTR_ERR(opp);
78 }
79 volt = opp_get_voltage(opp);
80 tol = volt * voltage_tolerance / 100;
81 volt_old = regulator_get_voltage(cpu_reg);
82 }
83
84 pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n",
85 freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
86 freqs.new / 1000, volt ? volt / 1000 : -1);
87
88 /* scaling up? scale voltage before frequency */
89 if (cpu_reg && freqs.new > freqs.old) {
90 ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
91 if (ret) {
92 pr_err("failed to scale voltage up: %d\n", ret);
93 freqs.new = freqs.old;
94 return ret;
95 }
96 }
97
98 ret = clk_set_rate(cpu_clk, freqs.new * 1000);
99 if (ret) {
100 pr_err("failed to set clock rate: %d\n", ret);
101 if (cpu_reg)
102 regulator_set_voltage_tol(cpu_reg, volt_old, tol);
103 return ret;
104 }
105
106 /* scaling down? scale voltage after frequency */
107 if (cpu_reg && freqs.new < freqs.old) {
108 ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
109 if (ret) {
110 pr_err("failed to scale voltage down: %d\n", ret);
111 clk_set_rate(cpu_clk, freqs.old * 1000);
112 freqs.new = freqs.old;
113 return ret;
114 }
115 }
116
117 for_each_online_cpu(cpu) {
118 freqs.cpu = cpu;
119 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
120 }
121
122 return 0;
123}
124
125static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
126{
127 int ret;
128
129 if (policy->cpu != 0)
130 return -EINVAL;
131
132 ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
133 if (ret) {
134 pr_err("invalid frequency table: %d\n", ret);
135 return ret;
136 }
137
138 policy->cpuinfo.transition_latency = transition_latency;
139 policy->cur = clk_get_rate(cpu_clk) / 1000;
140
141 /*
142 * The driver only supports the SMP configuartion where all processors
143 * share the clock and voltage and clock. Use cpufreq affected_cpus
144 * interface to have all CPUs scaled together.
145 */
146 policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
147 cpumask_setall(policy->cpus);
148
149 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
150
151 return 0;
152}
153
154static int cpu0_cpufreq_exit(struct cpufreq_policy *policy)
155{
156 cpufreq_frequency_table_put_attr(policy->cpu);
157
158 return 0;
159}
160
161static struct freq_attr *cpu0_cpufreq_attr[] = {
162 &cpufreq_freq_attr_scaling_available_freqs,
163 NULL,
164};
165
166static struct cpufreq_driver cpu0_cpufreq_driver = {
167 .flags = CPUFREQ_STICKY,
168 .verify = cpu0_verify_speed,
169 .target = cpu0_set_target,
170 .get = cpu0_get_speed,
171 .init = cpu0_cpufreq_init,
172 .exit = cpu0_cpufreq_exit,
173 .name = "generic_cpu0",
174 .attr = cpu0_cpufreq_attr,
175};
176
177static int __devinit cpu0_cpufreq_driver_init(void)
178{
179 struct device_node *np;
180 int ret;
181
182 np = of_find_node_by_path("/cpus/cpu@0");
183 if (!np) {
184 pr_err("failed to find cpu0 node\n");
185 return -ENOENT;
186 }
187
188 cpu_dev = get_cpu_device(0);
189 if (!cpu_dev) {
190 pr_err("failed to get cpu0 device\n");
191 ret = -ENODEV;
192 goto out_put_node;
193 }
194
195 cpu_dev->of_node = np;
196
197 cpu_clk = clk_get(cpu_dev, NULL);
198 if (IS_ERR(cpu_clk)) {
199 ret = PTR_ERR(cpu_clk);
200 pr_err("failed to get cpu0 clock: %d\n", ret);
201 goto out_put_node;
202 }
203
204 cpu_reg = regulator_get(cpu_dev, "cpu0");
205 if (IS_ERR(cpu_reg)) {
206 pr_warn("failed to get cpu0 regulator\n");
207 cpu_reg = NULL;
208 }
209
210 ret = of_init_opp_table(cpu_dev);
211 if (ret) {
212 pr_err("failed to init OPP table: %d\n", ret);
213 goto out_put_node;
214 }
215
216 ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
217 if (ret) {
218 pr_err("failed to init cpufreq table: %d\n", ret);
219 goto out_put_node;
220 }
221
222 of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
223
224 if (of_property_read_u32(np, "clock-latency", &transition_latency))
225 transition_latency = CPUFREQ_ETERNAL;
226
227 if (cpu_reg) {
228 struct opp *opp;
229 unsigned long min_uV, max_uV;
230 int i;
231
232 /*
233 * OPP is maintained in order of increasing frequency, and
234 * freq_table initialised from OPP is therefore sorted in the
235 * same order.
236 */
237 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
238 ;
239 opp = opp_find_freq_exact(cpu_dev,
240 freq_table[0].frequency * 1000, true);
241 min_uV = opp_get_voltage(opp);
242 opp = opp_find_freq_exact(cpu_dev,
243 freq_table[i-1].frequency * 1000, true);
244 max_uV = opp_get_voltage(opp);
245 ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
246 if (ret > 0)
247 transition_latency += ret * 1000;
248 }
249
250 ret = cpufreq_register_driver(&cpu0_cpufreq_driver);
251 if (ret) {
252 pr_err("failed register driver: %d\n", ret);
253 goto out_free_table;
254 }
255
256 of_node_put(np);
257 return 0;
258
259out_free_table:
260 opp_free_cpufreq_table(cpu_dev, &freq_table);
261out_put_node:
262 of_node_put(np);
263 return ret;
264}
265late_initcall(cpu0_cpufreq_driver_init);
266
267MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
268MODULE_DESCRIPTION("Generic CPU0 cpufreq driver");
269MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 235a340e81f2..b75dc2c2f8d3 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -504,6 +504,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
504 j_dbs_info->prev_cpu_nice = 504 j_dbs_info->prev_cpu_nice =
505 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 505 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
506 } 506 }
507 this_dbs_info->cpu = cpu;
507 this_dbs_info->down_skip = 0; 508 this_dbs_info->down_skip = 0;
508 this_dbs_info->requested_freq = policy->cur; 509 this_dbs_info->requested_freq = policy->cur;
509 510
@@ -583,6 +584,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
583 __cpufreq_driver_target( 584 __cpufreq_driver_target(
584 this_dbs_info->cur_policy, 585 this_dbs_info->cur_policy,
585 policy->min, CPUFREQ_RELATION_L); 586 policy->min, CPUFREQ_RELATION_L);
587 dbs_check_cpu(this_dbs_info);
586 mutex_unlock(&this_dbs_info->timer_mutex); 588 mutex_unlock(&this_dbs_info->timer_mutex);
587 589
588 break; 590 break;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 836e9b062e5e..9479fb33c30f 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -761,6 +761,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
761 else if (policy->min > this_dbs_info->cur_policy->cur) 761 else if (policy->min > this_dbs_info->cur_policy->cur)
762 __cpufreq_driver_target(this_dbs_info->cur_policy, 762 __cpufreq_driver_target(this_dbs_info->cur_policy,
763 policy->min, CPUFREQ_RELATION_L); 763 policy->min, CPUFREQ_RELATION_L);
764 dbs_check_cpu(this_dbs_info);
764 mutex_unlock(&this_dbs_info->timer_mutex); 765 mutex_unlock(&this_dbs_info->timer_mutex);
765 break; 766 break;
766 } 767 }
diff --git a/drivers/cpufreq/longhaul.h b/drivers/cpufreq/longhaul.h
index cbf48fbca881..e2dc436099d1 100644
--- a/drivers/cpufreq/longhaul.h
+++ b/drivers/cpufreq/longhaul.h
@@ -56,7 +56,7 @@ union msr_longhaul {
56/* 56/*
57 * VIA C3 Samuel 1 & Samuel 2 (stepping 0) 57 * VIA C3 Samuel 1 & Samuel 2 (stepping 0)
58 */ 58 */
59static const int __cpuinitdata samuel1_mults[16] = { 59static const int __cpuinitconst samuel1_mults[16] = {
60 -1, /* 0000 -> RESERVED */ 60 -1, /* 0000 -> RESERVED */
61 30, /* 0001 -> 3.0x */ 61 30, /* 0001 -> 3.0x */
62 40, /* 0010 -> 4.0x */ 62 40, /* 0010 -> 4.0x */
@@ -75,7 +75,7 @@ static const int __cpuinitdata samuel1_mults[16] = {
75 -1, /* 1111 -> RESERVED */ 75 -1, /* 1111 -> RESERVED */
76}; 76};
77 77
78static const int __cpuinitdata samuel1_eblcr[16] = { 78static const int __cpuinitconst samuel1_eblcr[16] = {
79 50, /* 0000 -> RESERVED */ 79 50, /* 0000 -> RESERVED */
80 30, /* 0001 -> 3.0x */ 80 30, /* 0001 -> 3.0x */
81 40, /* 0010 -> 4.0x */ 81 40, /* 0010 -> 4.0x */
@@ -97,7 +97,7 @@ static const int __cpuinitdata samuel1_eblcr[16] = {
97/* 97/*
98 * VIA C3 Samuel2 Stepping 1->15 98 * VIA C3 Samuel2 Stepping 1->15
99 */ 99 */
100static const int __cpuinitdata samuel2_eblcr[16] = { 100static const int __cpuinitconst samuel2_eblcr[16] = {
101 50, /* 0000 -> 5.0x */ 101 50, /* 0000 -> 5.0x */
102 30, /* 0001 -> 3.0x */ 102 30, /* 0001 -> 3.0x */
103 40, /* 0010 -> 4.0x */ 103 40, /* 0010 -> 4.0x */
@@ -119,7 +119,7 @@ static const int __cpuinitdata samuel2_eblcr[16] = {
119/* 119/*
120 * VIA C3 Ezra 120 * VIA C3 Ezra
121 */ 121 */
122static const int __cpuinitdata ezra_mults[16] = { 122static const int __cpuinitconst ezra_mults[16] = {
123 100, /* 0000 -> 10.0x */ 123 100, /* 0000 -> 10.0x */
124 30, /* 0001 -> 3.0x */ 124 30, /* 0001 -> 3.0x */
125 40, /* 0010 -> 4.0x */ 125 40, /* 0010 -> 4.0x */
@@ -138,7 +138,7 @@ static const int __cpuinitdata ezra_mults[16] = {
138 120, /* 1111 -> 12.0x */ 138 120, /* 1111 -> 12.0x */
139}; 139};
140 140
141static const int __cpuinitdata ezra_eblcr[16] = { 141static const int __cpuinitconst ezra_eblcr[16] = {
142 50, /* 0000 -> 5.0x */ 142 50, /* 0000 -> 5.0x */
143 30, /* 0001 -> 3.0x */ 143 30, /* 0001 -> 3.0x */
144 40, /* 0010 -> 4.0x */ 144 40, /* 0010 -> 4.0x */
@@ -160,7 +160,7 @@ static const int __cpuinitdata ezra_eblcr[16] = {
160/* 160/*
161 * VIA C3 (Ezra-T) [C5M]. 161 * VIA C3 (Ezra-T) [C5M].
162 */ 162 */
163static const int __cpuinitdata ezrat_mults[32] = { 163static const int __cpuinitconst ezrat_mults[32] = {
164 100, /* 0000 -> 10.0x */ 164 100, /* 0000 -> 10.0x */
165 30, /* 0001 -> 3.0x */ 165 30, /* 0001 -> 3.0x */
166 40, /* 0010 -> 4.0x */ 166 40, /* 0010 -> 4.0x */
@@ -196,7 +196,7 @@ static const int __cpuinitdata ezrat_mults[32] = {
196 -1, /* 1111 -> RESERVED (12.0x) */ 196 -1, /* 1111 -> RESERVED (12.0x) */
197}; 197};
198 198
199static const int __cpuinitdata ezrat_eblcr[32] = { 199static const int __cpuinitconst ezrat_eblcr[32] = {
200 50, /* 0000 -> 5.0x */ 200 50, /* 0000 -> 5.0x */
201 30, /* 0001 -> 3.0x */ 201 30, /* 0001 -> 3.0x */
202 40, /* 0010 -> 4.0x */ 202 40, /* 0010 -> 4.0x */
@@ -235,7 +235,7 @@ static const int __cpuinitdata ezrat_eblcr[32] = {
235/* 235/*
236 * VIA C3 Nehemiah */ 236 * VIA C3 Nehemiah */
237 237
238static const int __cpuinitdata nehemiah_mults[32] = { 238static const int __cpuinitconst nehemiah_mults[32] = {
239 100, /* 0000 -> 10.0x */ 239 100, /* 0000 -> 10.0x */
240 -1, /* 0001 -> 16.0x */ 240 -1, /* 0001 -> 16.0x */
241 40, /* 0010 -> 4.0x */ 241 40, /* 0010 -> 4.0x */
@@ -270,7 +270,7 @@ static const int __cpuinitdata nehemiah_mults[32] = {
270 -1, /* 1111 -> 12.0x */ 270 -1, /* 1111 -> 12.0x */
271}; 271};
272 272
273static const int __cpuinitdata nehemiah_eblcr[32] = { 273static const int __cpuinitconst nehemiah_eblcr[32] = {
274 50, /* 0000 -> 5.0x */ 274 50, /* 0000 -> 5.0x */
275 160, /* 0001 -> 16.0x */ 275 160, /* 0001 -> 16.0x */
276 40, /* 0010 -> 4.0x */ 276 40, /* 0010 -> 4.0x */
@@ -315,7 +315,7 @@ struct mV_pos {
315 unsigned short pos; 315 unsigned short pos;
316}; 316};
317 317
318static const struct mV_pos __cpuinitdata vrm85_mV[32] = { 318static const struct mV_pos __cpuinitconst vrm85_mV[32] = {
319 {1250, 8}, {1200, 6}, {1150, 4}, {1100, 2}, 319 {1250, 8}, {1200, 6}, {1150, 4}, {1100, 2},
320 {1050, 0}, {1800, 30}, {1750, 28}, {1700, 26}, 320 {1050, 0}, {1800, 30}, {1750, 28}, {1700, 26},
321 {1650, 24}, {1600, 22}, {1550, 20}, {1500, 18}, 321 {1650, 24}, {1600, 22}, {1550, 20}, {1500, 18},
@@ -326,14 +326,14 @@ static const struct mV_pos __cpuinitdata vrm85_mV[32] = {
326 {1475, 17}, {1425, 15}, {1375, 13}, {1325, 11} 326 {1475, 17}, {1425, 15}, {1375, 13}, {1325, 11}
327}; 327};
328 328
329static const unsigned char __cpuinitdata mV_vrm85[32] = { 329static const unsigned char __cpuinitconst mV_vrm85[32] = {
330 0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11, 330 0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11,
331 0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d, 331 0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d,
332 0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19, 332 0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19,
333 0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15 333 0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15
334}; 334};
335 335
336static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = { 336static const struct mV_pos __cpuinitconst mobilevrm_mV[32] = {
337 {1750, 31}, {1700, 30}, {1650, 29}, {1600, 28}, 337 {1750, 31}, {1700, 30}, {1650, 29}, {1600, 28},
338 {1550, 27}, {1500, 26}, {1450, 25}, {1400, 24}, 338 {1550, 27}, {1500, 26}, {1450, 25}, {1400, 24},
339 {1350, 23}, {1300, 22}, {1250, 21}, {1200, 20}, 339 {1350, 23}, {1300, 22}, {1250, 21}, {1200, 20},
@@ -344,7 +344,7 @@ static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = {
344 {675, 3}, {650, 2}, {625, 1}, {600, 0} 344 {675, 3}, {650, 2}, {625, 1}, {600, 0}
345}; 345};
346 346
347static const unsigned char __cpuinitdata mV_mobilevrm[32] = { 347static const unsigned char __cpuinitconst mV_mobilevrm[32] = {
348 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 348 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
349 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 349 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
350 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, 350 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index b47034e650a5..6e22f4481c07 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -40,16 +40,6 @@
40/* OPP tolerance in percentage */ 40/* OPP tolerance in percentage */
41#define OPP_TOLERANCE 4 41#define OPP_TOLERANCE 4
42 42
43#ifdef CONFIG_SMP
44struct lpj_info {
45 unsigned long ref;
46 unsigned int freq;
47};
48
49static DEFINE_PER_CPU(struct lpj_info, lpj_ref);
50static struct lpj_info global_lpj_ref;
51#endif
52
53static struct cpufreq_frequency_table *freq_table; 43static struct cpufreq_frequency_table *freq_table;
54static atomic_t freq_table_users = ATOMIC_INIT(0); 44static atomic_t freq_table_users = ATOMIC_INIT(0);
55static struct clk *mpu_clk; 45static struct clk *mpu_clk;
@@ -161,31 +151,6 @@ static int omap_target(struct cpufreq_policy *policy,
161 } 151 }
162 152
163 freqs.new = omap_getspeed(policy->cpu); 153 freqs.new = omap_getspeed(policy->cpu);
164#ifdef CONFIG_SMP
165 /*
166 * Note that loops_per_jiffy is not updated on SMP systems in
167 * cpufreq driver. So, update the per-CPU loops_per_jiffy value
168 * on frequency transition. We need to update all dependent CPUs.
169 */
170 for_each_cpu(i, policy->cpus) {
171 struct lpj_info *lpj = &per_cpu(lpj_ref, i);
172 if (!lpj->freq) {
173 lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy;
174 lpj->freq = freqs.old;
175 }
176
177 per_cpu(cpu_data, i).loops_per_jiffy =
178 cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
179 }
180
181 /* And don't forget to adjust the global one */
182 if (!global_lpj_ref.freq) {
183 global_lpj_ref.ref = loops_per_jiffy;
184 global_lpj_ref.freq = freqs.old;
185 }
186 loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq,
187 freqs.new);
188#endif
189 154
190done: 155done:
191 /* notifiers */ 156 /* notifiers */
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index c0e816468e30..0b19faf002ee 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -49,22 +49,12 @@
49#define PFX "powernow-k8: " 49#define PFX "powernow-k8: "
50#define VERSION "version 2.20.00" 50#define VERSION "version 2.20.00"
51#include "powernow-k8.h" 51#include "powernow-k8.h"
52#include "mperf.h"
53 52
54/* serialize freq changes */ 53/* serialize freq changes */
55static DEFINE_MUTEX(fidvid_mutex); 54static DEFINE_MUTEX(fidvid_mutex);
56 55
57static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); 56static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
58 57
59static int cpu_family = CPU_OPTERON;
60
61/* array to map SW pstate number to acpi state */
62static u32 ps_to_as[8];
63
64/* core performance boost */
65static bool cpb_capable, cpb_enabled;
66static struct msr __percpu *msrs;
67
68static struct cpufreq_driver cpufreq_amd64_driver; 58static struct cpufreq_driver cpufreq_amd64_driver;
69 59
70#ifndef CONFIG_SMP 60#ifndef CONFIG_SMP
@@ -86,12 +76,6 @@ static u32 find_khz_freq_from_fid(u32 fid)
86 return 1000 * find_freq_from_fid(fid); 76 return 1000 * find_freq_from_fid(fid);
87} 77}
88 78
89static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
90 u32 pstate)
91{
92 return data[ps_to_as[pstate]].frequency;
93}
94
95/* Return the vco fid for an input fid 79/* Return the vco fid for an input fid
96 * 80 *
97 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids 81 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
@@ -114,9 +98,6 @@ static int pending_bit_stuck(void)
114{ 98{
115 u32 lo, hi; 99 u32 lo, hi;
116 100
117 if (cpu_family == CPU_HW_PSTATE)
118 return 0;
119
120 rdmsr(MSR_FIDVID_STATUS, lo, hi); 101 rdmsr(MSR_FIDVID_STATUS, lo, hi);
121 return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0; 102 return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
122} 103}
@@ -130,20 +111,6 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
130 u32 lo, hi; 111 u32 lo, hi;
131 u32 i = 0; 112 u32 i = 0;
132 113
133 if (cpu_family == CPU_HW_PSTATE) {
134 rdmsr(MSR_PSTATE_STATUS, lo, hi);
135 i = lo & HW_PSTATE_MASK;
136 data->currpstate = i;
137
138 /*
139 * a workaround for family 11h erratum 311 might cause
140 * an "out-of-range Pstate if the core is in Pstate-0
141 */
142 if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps))
143 data->currpstate = HW_PSTATE_0;
144
145 return 0;
146 }
147 do { 114 do {
148 if (i++ > 10000) { 115 if (i++ > 10000) {
149 pr_debug("detected change pending stuck\n"); 116 pr_debug("detected change pending stuck\n");
@@ -300,14 +267,6 @@ static int decrease_vid_code_by_step(struct powernow_k8_data *data,
300 return 0; 267 return 0;
301} 268}
302 269
303/* Change hardware pstate by single MSR write */
304static int transition_pstate(struct powernow_k8_data *data, u32 pstate)
305{
306 wrmsr(MSR_PSTATE_CTRL, pstate, 0);
307 data->currpstate = pstate;
308 return 0;
309}
310
311/* Change Opteron/Athlon64 fid and vid, by the 3 phases. */ 270/* Change Opteron/Athlon64 fid and vid, by the 3 phases. */
312static int transition_fid_vid(struct powernow_k8_data *data, 271static int transition_fid_vid(struct powernow_k8_data *data,
313 u32 reqfid, u32 reqvid) 272 u32 reqfid, u32 reqvid)
@@ -524,8 +483,6 @@ static int core_voltage_post_transition(struct powernow_k8_data *data,
524static const struct x86_cpu_id powernow_k8_ids[] = { 483static const struct x86_cpu_id powernow_k8_ids[] = {
525 /* IO based frequency switching */ 484 /* IO based frequency switching */
526 { X86_VENDOR_AMD, 0xf }, 485 { X86_VENDOR_AMD, 0xf },
527 /* MSR based frequency switching supported */
528 X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
529 {} 486 {}
530}; 487};
531MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids); 488MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids);
@@ -561,15 +518,8 @@ static void check_supported_cpu(void *_rc)
561 "Power state transitions not supported\n"); 518 "Power state transitions not supported\n");
562 return; 519 return;
563 } 520 }
564 } else { /* must be a HW Pstate capable processor */ 521 *rc = 0;
565 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
566 if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE)
567 cpu_family = CPU_HW_PSTATE;
568 else
569 return;
570 } 522 }
571
572 *rc = 0;
573} 523}
574 524
575static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, 525static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
@@ -633,18 +583,11 @@ static void print_basics(struct powernow_k8_data *data)
633 for (j = 0; j < data->numps; j++) { 583 for (j = 0; j < data->numps; j++) {
634 if (data->powernow_table[j].frequency != 584 if (data->powernow_table[j].frequency !=
635 CPUFREQ_ENTRY_INVALID) { 585 CPUFREQ_ENTRY_INVALID) {
636 if (cpu_family == CPU_HW_PSTATE) {
637 printk(KERN_INFO PFX
638 " %d : pstate %d (%d MHz)\n", j,
639 data->powernow_table[j].index,
640 data->powernow_table[j].frequency/1000);
641 } else {
642 printk(KERN_INFO PFX 586 printk(KERN_INFO PFX
643 "fid 0x%x (%d MHz), vid 0x%x\n", 587 "fid 0x%x (%d MHz), vid 0x%x\n",
644 data->powernow_table[j].index & 0xff, 588 data->powernow_table[j].index & 0xff,
645 data->powernow_table[j].frequency/1000, 589 data->powernow_table[j].frequency/1000,
646 data->powernow_table[j].index >> 8); 590 data->powernow_table[j].index >> 8);
647 }
648 } 591 }
649 } 592 }
650 if (data->batps) 593 if (data->batps)
@@ -652,20 +595,6 @@ static void print_basics(struct powernow_k8_data *data)
652 data->batps); 595 data->batps);
653} 596}
654 597
655static u32 freq_from_fid_did(u32 fid, u32 did)
656{
657 u32 mhz = 0;
658
659 if (boot_cpu_data.x86 == 0x10)
660 mhz = (100 * (fid + 0x10)) >> did;
661 else if (boot_cpu_data.x86 == 0x11)
662 mhz = (100 * (fid + 8)) >> did;
663 else
664 BUG();
665
666 return mhz * 1000;
667}
668
669static int fill_powernow_table(struct powernow_k8_data *data, 598static int fill_powernow_table(struct powernow_k8_data *data,
670 struct pst_s *pst, u8 maxvid) 599 struct pst_s *pst, u8 maxvid)
671{ 600{
@@ -825,7 +754,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data,
825{ 754{
826 u64 control; 755 u64 control;
827 756
828 if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) 757 if (!data->acpi_data.state_count)
829 return; 758 return;
830 759
831 control = data->acpi_data.states[index].control; 760 control = data->acpi_data.states[index].control;
@@ -876,10 +805,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
876 data->numps = data->acpi_data.state_count; 805 data->numps = data->acpi_data.state_count;
877 powernow_k8_acpi_pst_values(data, 0); 806 powernow_k8_acpi_pst_values(data, 0);
878 807
879 if (cpu_family == CPU_HW_PSTATE) 808 ret_val = fill_powernow_table_fidvid(data, powernow_table);
880 ret_val = fill_powernow_table_pstate(data, powernow_table);
881 else
882 ret_val = fill_powernow_table_fidvid(data, powernow_table);
883 if (ret_val) 809 if (ret_val)
884 goto err_out_mem; 810 goto err_out_mem;
885 811
@@ -916,51 +842,6 @@ err_out:
916 return ret_val; 842 return ret_val;
917} 843}
918 844
919static int fill_powernow_table_pstate(struct powernow_k8_data *data,
920 struct cpufreq_frequency_table *powernow_table)
921{
922 int i;
923 u32 hi = 0, lo = 0;
924 rdmsr(MSR_PSTATE_CUR_LIMIT, lo, hi);
925 data->max_hw_pstate = (lo & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
926
927 for (i = 0; i < data->acpi_data.state_count; i++) {
928 u32 index;
929
930 index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
931 if (index > data->max_hw_pstate) {
932 printk(KERN_ERR PFX "invalid pstate %d - "
933 "bad value %d.\n", i, index);
934 printk(KERN_ERR PFX "Please report to BIOS "
935 "manufacturer\n");
936 invalidate_entry(powernow_table, i);
937 continue;
938 }
939
940 ps_to_as[index] = i;
941
942 /* Frequency may be rounded for these */
943 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
944 || boot_cpu_data.x86 == 0x11) {
945
946 rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
947 if (!(hi & HW_PSTATE_VALID_MASK)) {
948 pr_debug("invalid pstate %d, ignoring\n", index);
949 invalidate_entry(powernow_table, i);
950 continue;
951 }
952
953 powernow_table[i].frequency =
954 freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
955 } else
956 powernow_table[i].frequency =
957 data->acpi_data.states[i].core_frequency * 1000;
958
959 powernow_table[i].index = index;
960 }
961 return 0;
962}
963
964static int fill_powernow_table_fidvid(struct powernow_k8_data *data, 845static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
965 struct cpufreq_frequency_table *powernow_table) 846 struct cpufreq_frequency_table *powernow_table)
966{ 847{
@@ -1037,15 +918,7 @@ static int get_transition_latency(struct powernow_k8_data *data)
1037 max_latency = cur_latency; 918 max_latency = cur_latency;
1038 } 919 }
1039 if (max_latency == 0) { 920 if (max_latency == 0) {
1040 /* 921 pr_err(FW_WARN PFX "Invalid zero transition latency\n");
1041 * Fam 11h and later may return 0 as transition latency. This
1042 * is intended and means "very fast". While cpufreq core and
1043 * governors currently can handle that gracefully, better set it
1044 * to 1 to avoid problems in the future.
1045 */
1046 if (boot_cpu_data.x86 < 0x11)
1047 printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
1048 "latency\n");
1049 max_latency = 1; 922 max_latency = 1;
1050 } 923 }
1051 /* value in usecs, needs to be in nanoseconds */ 924 /* value in usecs, needs to be in nanoseconds */
@@ -1105,40 +978,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
1105 return res; 978 return res;
1106} 979}
1107 980
1108/* Take a frequency, and issue the hardware pstate transition command */
1109static int transition_frequency_pstate(struct powernow_k8_data *data,
1110 unsigned int index)
1111{
1112 u32 pstate = 0;
1113 int res, i;
1114 struct cpufreq_freqs freqs;
1115
1116 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
1117
1118 /* get MSR index for hardware pstate transition */
1119 pstate = index & HW_PSTATE_MASK;
1120 if (pstate > data->max_hw_pstate)
1121 return -EINVAL;
1122
1123 freqs.old = find_khz_freq_from_pstate(data->powernow_table,
1124 data->currpstate);
1125 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
1126
1127 for_each_cpu(i, data->available_cores) {
1128 freqs.cpu = i;
1129 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1130 }
1131
1132 res = transition_pstate(data, pstate);
1133 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
1134
1135 for_each_cpu(i, data->available_cores) {
1136 freqs.cpu = i;
1137 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1138 }
1139 return res;
1140}
1141
1142/* Driver entry point to switch to the target frequency */ 981/* Driver entry point to switch to the target frequency */
1143static int powernowk8_target(struct cpufreq_policy *pol, 982static int powernowk8_target(struct cpufreq_policy *pol,
1144 unsigned targfreq, unsigned relation) 983 unsigned targfreq, unsigned relation)
@@ -1180,18 +1019,15 @@ static int powernowk8_target(struct cpufreq_policy *pol,
1180 if (query_current_values_with_pending_wait(data)) 1019 if (query_current_values_with_pending_wait(data))
1181 goto err_out; 1020 goto err_out;
1182 1021
1183 if (cpu_family != CPU_HW_PSTATE) { 1022 pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
1184 pr_debug("targ: curr fid 0x%x, vid 0x%x\n", 1023 data->currfid, data->currvid);
1185 data->currfid, data->currvid);
1186 1024
1187 if ((checkvid != data->currvid) || 1025 if ((checkvid != data->currvid) ||
1188 (checkfid != data->currfid)) { 1026 (checkfid != data->currfid)) {
1189 printk(KERN_INFO PFX 1027 pr_info(PFX
1190 "error - out of sync, fix 0x%x 0x%x, " 1028 "error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n",
1191 "vid 0x%x 0x%x\n", 1029 checkfid, data->currfid,
1192 checkfid, data->currfid, 1030 checkvid, data->currvid);
1193 checkvid, data->currvid);
1194 }
1195 } 1031 }
1196 1032
1197 if (cpufreq_frequency_table_target(pol, data->powernow_table, 1033 if (cpufreq_frequency_table_target(pol, data->powernow_table,
@@ -1202,11 +1038,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
1202 1038
1203 powernow_k8_acpi_pst_values(data, newstate); 1039 powernow_k8_acpi_pst_values(data, newstate);
1204 1040
1205 if (cpu_family == CPU_HW_PSTATE) 1041 ret = transition_frequency_fidvid(data, newstate);
1206 ret = transition_frequency_pstate(data, 1042
1207 data->powernow_table[newstate].index);
1208 else
1209 ret = transition_frequency_fidvid(data, newstate);
1210 if (ret) { 1043 if (ret) {
1211 printk(KERN_ERR PFX "transition frequency failed\n"); 1044 printk(KERN_ERR PFX "transition frequency failed\n");
1212 ret = 1; 1045 ret = 1;
@@ -1215,11 +1048,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
1215 } 1048 }
1216 mutex_unlock(&fidvid_mutex); 1049 mutex_unlock(&fidvid_mutex);
1217 1050
1218 if (cpu_family == CPU_HW_PSTATE) 1051 pol->cur = find_khz_freq_from_fid(data->currfid);
1219 pol->cur = find_khz_freq_from_pstate(data->powernow_table,
1220 data->powernow_table[newstate].index);
1221 else
1222 pol->cur = find_khz_freq_from_fid(data->currfid);
1223 ret = 0; 1052 ret = 0;
1224 1053
1225err_out: 1054err_out:
@@ -1259,22 +1088,23 @@ static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
1259 return; 1088 return;
1260 } 1089 }
1261 1090
1262 if (cpu_family == CPU_OPTERON) 1091 fidvid_msr_init();
1263 fidvid_msr_init();
1264 1092
1265 init_on_cpu->rc = 0; 1093 init_on_cpu->rc = 0;
1266} 1094}
1267 1095
1096static const char missing_pss_msg[] =
1097 KERN_ERR
1098 FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
1099 FW_BUG PFX "First, make sure Cool'N'Quiet is enabled in the BIOS.\n"
1100 FW_BUG PFX "If that doesn't help, try upgrading your BIOS.\n";
1101
1268/* per CPU init entry point to the driver */ 1102/* per CPU init entry point to the driver */
1269static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) 1103static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1270{ 1104{
1271 static const char ACPI_PSS_BIOS_BUG_MSG[] =
1272 KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
1273 FW_BUG PFX "Try again with latest BIOS.\n";
1274 struct powernow_k8_data *data; 1105 struct powernow_k8_data *data;
1275 struct init_on_cpu init_on_cpu; 1106 struct init_on_cpu init_on_cpu;
1276 int rc; 1107 int rc;
1277 struct cpuinfo_x86 *c = &cpu_data(pol->cpu);
1278 1108
1279 if (!cpu_online(pol->cpu)) 1109 if (!cpu_online(pol->cpu))
1280 return -ENODEV; 1110 return -ENODEV;
@@ -1290,7 +1120,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1290 } 1120 }
1291 1121
1292 data->cpu = pol->cpu; 1122 data->cpu = pol->cpu;
1293 data->currpstate = HW_PSTATE_INVALID;
1294 1123
1295 if (powernow_k8_cpu_init_acpi(data)) { 1124 if (powernow_k8_cpu_init_acpi(data)) {
1296 /* 1125 /*
@@ -1298,7 +1127,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1298 * an UP version, and is deprecated by AMD. 1127 * an UP version, and is deprecated by AMD.
1299 */ 1128 */
1300 if (num_online_cpus() != 1) { 1129 if (num_online_cpus() != 1) {
1301 printk_once(ACPI_PSS_BIOS_BUG_MSG); 1130 printk_once(missing_pss_msg);
1302 goto err_out; 1131 goto err_out;
1303 } 1132 }
1304 if (pol->cpu != 0) { 1133 if (pol->cpu != 0) {
@@ -1327,17 +1156,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1327 if (rc != 0) 1156 if (rc != 0)
1328 goto err_out_exit_acpi; 1157 goto err_out_exit_acpi;
1329 1158
1330 if (cpu_family == CPU_HW_PSTATE) 1159 cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
1331 cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
1332 else
1333 cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
1334 data->available_cores = pol->cpus; 1160 data->available_cores = pol->cpus;
1335 1161
1336 if (cpu_family == CPU_HW_PSTATE) 1162 pol->cur = find_khz_freq_from_fid(data->currfid);
1337 pol->cur = find_khz_freq_from_pstate(data->powernow_table,
1338 data->currpstate);
1339 else
1340 pol->cur = find_khz_freq_from_fid(data->currfid);
1341 pr_debug("policy current frequency %d kHz\n", pol->cur); 1163 pr_debug("policy current frequency %d kHz\n", pol->cur);
1342 1164
1343 /* min/max the cpu is capable of */ 1165 /* min/max the cpu is capable of */
@@ -1349,18 +1171,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1349 return -EINVAL; 1171 return -EINVAL;
1350 } 1172 }
1351 1173
1352 /* Check for APERF/MPERF support in hardware */
1353 if (cpu_has(c, X86_FEATURE_APERFMPERF))
1354 cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
1355
1356 cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); 1174 cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
1357 1175
1358 if (cpu_family == CPU_HW_PSTATE) 1176 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
1359 pr_debug("cpu_init done, current pstate 0x%x\n", 1177 data->currfid, data->currvid);
1360 data->currpstate);
1361 else
1362 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
1363 data->currfid, data->currvid);
1364 1178
1365 per_cpu(powernow_data, pol->cpu) = data; 1179 per_cpu(powernow_data, pol->cpu) = data;
1366 1180
@@ -1413,88 +1227,15 @@ static unsigned int powernowk8_get(unsigned int cpu)
1413 if (err) 1227 if (err)
1414 goto out; 1228 goto out;
1415 1229
1416 if (cpu_family == CPU_HW_PSTATE) 1230 khz = find_khz_freq_from_fid(data->currfid);
1417 khz = find_khz_freq_from_pstate(data->powernow_table,
1418 data->currpstate);
1419 else
1420 khz = find_khz_freq_from_fid(data->currfid);
1421 1231
1422 1232
1423out: 1233out:
1424 return khz; 1234 return khz;
1425} 1235}
1426 1236
1427static void _cpb_toggle_msrs(bool t)
1428{
1429 int cpu;
1430
1431 get_online_cpus();
1432
1433 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1434
1435 for_each_cpu(cpu, cpu_online_mask) {
1436 struct msr *reg = per_cpu_ptr(msrs, cpu);
1437 if (t)
1438 reg->l &= ~BIT(25);
1439 else
1440 reg->l |= BIT(25);
1441 }
1442 wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1443
1444 put_online_cpus();
1445}
1446
1447/*
1448 * Switch on/off core performance boosting.
1449 *
1450 * 0=disable
1451 * 1=enable.
1452 */
1453static void cpb_toggle(bool t)
1454{
1455 if (!cpb_capable)
1456 return;
1457
1458 if (t && !cpb_enabled) {
1459 cpb_enabled = true;
1460 _cpb_toggle_msrs(t);
1461 printk(KERN_INFO PFX "Core Boosting enabled.\n");
1462 } else if (!t && cpb_enabled) {
1463 cpb_enabled = false;
1464 _cpb_toggle_msrs(t);
1465 printk(KERN_INFO PFX "Core Boosting disabled.\n");
1466 }
1467}
1468
1469static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
1470 size_t count)
1471{
1472 int ret = -EINVAL;
1473 unsigned long val = 0;
1474
1475 ret = strict_strtoul(buf, 10, &val);
1476 if (!ret && (val == 0 || val == 1) && cpb_capable)
1477 cpb_toggle(val);
1478 else
1479 return -EINVAL;
1480
1481 return count;
1482}
1483
1484static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
1485{
1486 return sprintf(buf, "%u\n", cpb_enabled);
1487}
1488
1489#define define_one_rw(_name) \
1490static struct freq_attr _name = \
1491__ATTR(_name, 0644, show_##_name, store_##_name)
1492
1493define_one_rw(cpb);
1494
1495static struct freq_attr *powernow_k8_attr[] = { 1237static struct freq_attr *powernow_k8_attr[] = {
1496 &cpufreq_freq_attr_scaling_available_freqs, 1238 &cpufreq_freq_attr_scaling_available_freqs,
1497 &cpb,
1498 NULL, 1239 NULL,
1499}; 1240};
1500 1241
@@ -1510,53 +1251,18 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
1510 .attr = powernow_k8_attr, 1251 .attr = powernow_k8_attr,
1511}; 1252};
1512 1253
1513/*
1514 * Clear the boost-disable flag on the CPU_DOWN path so that this cpu
1515 * cannot block the remaining ones from boosting. On the CPU_UP path we
1516 * simply keep the boost-disable flag in sync with the current global
1517 * state.
1518 */
1519static int cpb_notify(struct notifier_block *nb, unsigned long action,
1520 void *hcpu)
1521{
1522 unsigned cpu = (long)hcpu;
1523 u32 lo, hi;
1524
1525 switch (action) {
1526 case CPU_UP_PREPARE:
1527 case CPU_UP_PREPARE_FROZEN:
1528
1529 if (!cpb_enabled) {
1530 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
1531 lo |= BIT(25);
1532 wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
1533 }
1534 break;
1535
1536 case CPU_DOWN_PREPARE:
1537 case CPU_DOWN_PREPARE_FROZEN:
1538 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
1539 lo &= ~BIT(25);
1540 wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
1541 break;
1542
1543 default:
1544 break;
1545 }
1546
1547 return NOTIFY_OK;
1548}
1549
1550static struct notifier_block cpb_nb = {
1551 .notifier_call = cpb_notify,
1552};
1553
1554/* driver entry point for init */ 1254/* driver entry point for init */
1555static int __cpuinit powernowk8_init(void) 1255static int __cpuinit powernowk8_init(void)
1556{ 1256{
1557 unsigned int i, supported_cpus = 0, cpu; 1257 unsigned int i, supported_cpus = 0;
1558 int rv; 1258 int rv;
1559 1259
1260 if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
1261 pr_warn(PFX "this CPU is not supported anymore, using acpi-cpufreq instead.\n");
1262 request_module("acpi-cpufreq");
1263 return -ENODEV;
1264 }
1265
1560 if (!x86_match_cpu(powernow_k8_ids)) 1266 if (!x86_match_cpu(powernow_k8_ids))
1561 return -ENODEV; 1267 return -ENODEV;
1562 1268
@@ -1570,38 +1276,13 @@ static int __cpuinit powernowk8_init(void)
1570 if (supported_cpus != num_online_cpus()) 1276 if (supported_cpus != num_online_cpus())
1571 return -ENODEV; 1277 return -ENODEV;
1572 1278
1573 printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n", 1279 rv = cpufreq_register_driver(&cpufreq_amd64_driver);
1574 num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
1575
1576 if (boot_cpu_has(X86_FEATURE_CPB)) {
1577
1578 cpb_capable = true;
1579
1580 msrs = msrs_alloc();
1581 if (!msrs) {
1582 printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
1583 return -ENOMEM;
1584 }
1585
1586 register_cpu_notifier(&cpb_nb);
1587
1588 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1589 1280
1590 for_each_cpu(cpu, cpu_online_mask) { 1281 if (!rv)
1591 struct msr *reg = per_cpu_ptr(msrs, cpu); 1282 pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
1592 cpb_enabled |= !(!!(reg->l & BIT(25))); 1283 num_online_nodes(), boot_cpu_data.x86_model_id,
1593 } 1284 supported_cpus);
1594 1285
1595 printk(KERN_INFO PFX "Core Performance Boosting: %s.\n",
1596 (cpb_enabled ? "on" : "off"));
1597 }
1598
1599 rv = cpufreq_register_driver(&cpufreq_amd64_driver);
1600 if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
1601 unregister_cpu_notifier(&cpb_nb);
1602 msrs_free(msrs);
1603 msrs = NULL;
1604 }
1605 return rv; 1286 return rv;
1606} 1287}
1607 1288
@@ -1610,13 +1291,6 @@ static void __exit powernowk8_exit(void)
1610{ 1291{
1611 pr_debug("exit\n"); 1292 pr_debug("exit\n");
1612 1293
1613 if (boot_cpu_has(X86_FEATURE_CPB)) {
1614 msrs_free(msrs);
1615 msrs = NULL;
1616
1617 unregister_cpu_notifier(&cpb_nb);
1618 }
1619
1620 cpufreq_unregister_driver(&cpufreq_amd64_driver); 1294 cpufreq_unregister_driver(&cpufreq_amd64_driver);
1621} 1295}
1622 1296
diff --git a/drivers/cpufreq/powernow-k8.h b/drivers/cpufreq/powernow-k8.h
index 3744d26cdc2b..79329d4d5abe 100644
--- a/drivers/cpufreq/powernow-k8.h
+++ b/drivers/cpufreq/powernow-k8.h
@@ -5,24 +5,11 @@
5 * http://www.gnu.org/licenses/gpl.html 5 * http://www.gnu.org/licenses/gpl.html
6 */ 6 */
7 7
8enum pstate {
9 HW_PSTATE_INVALID = 0xff,
10 HW_PSTATE_0 = 0,
11 HW_PSTATE_1 = 1,
12 HW_PSTATE_2 = 2,
13 HW_PSTATE_3 = 3,
14 HW_PSTATE_4 = 4,
15 HW_PSTATE_5 = 5,
16 HW_PSTATE_6 = 6,
17 HW_PSTATE_7 = 7,
18};
19
20struct powernow_k8_data { 8struct powernow_k8_data {
21 unsigned int cpu; 9 unsigned int cpu;
22 10
23 u32 numps; /* number of p-states */ 11 u32 numps; /* number of p-states */
24 u32 batps; /* number of p-states supported on battery */ 12 u32 batps; /* number of p-states supported on battery */
25 u32 max_hw_pstate; /* maximum legal hardware pstate */
26 13
27 /* these values are constant when the PSB is used to determine 14 /* these values are constant when the PSB is used to determine
28 * vid/fid pairings, but are modified during the ->target() call 15 * vid/fid pairings, but are modified during the ->target() call
@@ -37,7 +24,6 @@ struct powernow_k8_data {
37 /* keep track of the current fid / vid or pstate */ 24 /* keep track of the current fid / vid or pstate */
38 u32 currvid; 25 u32 currvid;
39 u32 currfid; 26 u32 currfid;
40 enum pstate currpstate;
41 27
42 /* the powernow_table includes all frequency and vid/fid pairings: 28 /* the powernow_table includes all frequency and vid/fid pairings:
43 * fid are the lower 8 bits of the index, vid are the upper 8 bits. 29 * fid are the lower 8 bits of the index, vid are the upper 8 bits.
@@ -97,23 +83,6 @@ struct powernow_k8_data {
97#define MSR_S_HI_CURRENT_VID 0x0000003f 83#define MSR_S_HI_CURRENT_VID 0x0000003f
98#define MSR_C_HI_STP_GNT_BENIGN 0x00000001 84#define MSR_C_HI_STP_GNT_BENIGN 0x00000001
99 85
100
101/* Hardware Pstate _PSS and MSR definitions */
102#define USE_HW_PSTATE 0x00000080
103#define HW_PSTATE_MASK 0x00000007
104#define HW_PSTATE_VALID_MASK 0x80000000
105#define HW_PSTATE_MAX_MASK 0x000000f0
106#define HW_PSTATE_MAX_SHIFT 4
107#define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */
108#define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */
109#define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */
110#define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */
111
112/* define the two driver architectures */
113#define CPU_OPTERON 0
114#define CPU_HW_PSTATE 1
115
116
117/* 86/*
118 * There are restrictions frequencies have to follow: 87 * There are restrictions frequencies have to follow:
119 * - only 1 entry in the low fid table ( <=1.4GHz ) 88 * - only 1 entry in the low fid table ( <=1.4GHz )
@@ -218,5 +187,4 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
218 187
219static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index); 188static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
220 189
221static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
222static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); 190static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
diff --git a/include/linux/opp.h b/include/linux/opp.h
index 2a4e5faee904..214e0ebcb84d 100644
--- a/include/linux/opp.h
+++ b/include/linux/opp.h
@@ -48,6 +48,14 @@ int opp_disable(struct device *dev, unsigned long freq);
48 48
49struct srcu_notifier_head *opp_get_notifier(struct device *dev); 49struct srcu_notifier_head *opp_get_notifier(struct device *dev);
50 50
51#ifdef CONFIG_OF
52int of_init_opp_table(struct device *dev);
53#else
54static inline int of_init_opp_table(struct device *dev)
55{
56 return -EINVAL;
57}
58#endif /* CONFIG_OF */
51#else 59#else
52static inline unsigned long opp_get_voltage(struct opp *opp) 60static inline unsigned long opp_get_voltage(struct opp *opp)
53{ 61{