diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:16:27 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:16:27 -0400 |
commit | ee580dc91efd83e6b55955e7261e8ad2a0e08d1a (patch) | |
tree | a6f0884e77913df35ae4219fa66fa0c95359c5cf /arch/x86 | |
parent | c18db0d7e299791c73d4dbe5ae7905b2ab8ba332 (diff) |
i386: move kernel/cpu/cpufreq
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
22 files changed, 9442 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig new file mode 100644 index 000000000000..d8c6f132dc7a --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/Kconfig | |||
@@ -0,0 +1,250 @@ | |||
1 | # | ||
2 | # CPU Frequency scaling | ||
3 | # | ||
4 | |||
5 | menu "CPU Frequency scaling" | ||
6 | |||
7 | source "drivers/cpufreq/Kconfig" | ||
8 | |||
9 | if CPU_FREQ | ||
10 | |||
11 | comment "CPUFreq processor drivers" | ||
12 | |||
13 | config X86_ACPI_CPUFREQ | ||
14 | tristate "ACPI Processor P-States driver" | ||
15 | select CPU_FREQ_TABLE | ||
16 | depends on ACPI_PROCESSOR | ||
17 | help | ||
18 | This driver adds a CPUFreq driver which utilizes the ACPI | ||
19 | Processor Performance States. | ||
20 | This driver also supports Intel Enhanced Speedstep. | ||
21 | |||
22 | For details, take a look at <file:Documentation/cpu-freq/>. | ||
23 | |||
24 | If in doubt, say N. | ||
25 | |||
26 | config ELAN_CPUFREQ | ||
27 | tristate "AMD Elan SC400 and SC410" | ||
28 | select CPU_FREQ_TABLE | ||
29 | depends on X86_ELAN | ||
30 | ---help--- | ||
31 | This adds the CPUFreq driver for AMD Elan SC400 and SC410 | ||
32 | processors. | ||
33 | |||
34 | You need to specify the processor maximum speed as boot | ||
35 | parameter: elanfreq=maxspeed (in kHz) or as module | ||
36 | parameter "max_freq". | ||
37 | |||
38 | For details, take a look at <file:Documentation/cpu-freq/>. | ||
39 | |||
40 | If in doubt, say N. | ||
41 | |||
42 | config SC520_CPUFREQ | ||
43 | tristate "AMD Elan SC520" | ||
44 | select CPU_FREQ_TABLE | ||
45 | depends on X86_ELAN | ||
46 | ---help--- | ||
47 | This adds the CPUFreq driver for AMD Elan SC520 processor. | ||
48 | |||
49 | For details, take a look at <file:Documentation/cpu-freq/>. | ||
50 | |||
51 | If in doubt, say N. | ||
52 | |||
53 | |||
54 | config X86_POWERNOW_K6 | ||
55 | tristate "AMD Mobile K6-2/K6-3 PowerNow!" | ||
56 | select CPU_FREQ_TABLE | ||
57 | help | ||
58 | This adds the CPUFreq driver for mobile AMD K6-2+ and mobile | ||
59 | AMD K6-3+ processors. | ||
60 | |||
61 | For details, take a look at <file:Documentation/cpu-freq/>. | ||
62 | |||
63 | If in doubt, say N. | ||
64 | |||
65 | config X86_POWERNOW_K7 | ||
66 | tristate "AMD Mobile Athlon/Duron PowerNow!" | ||
67 | select CPU_FREQ_TABLE | ||
68 | help | ||
69 | This adds the CPUFreq driver for mobile AMD K7 mobile processors. | ||
70 | |||
71 | For details, take a look at <file:Documentation/cpu-freq/>. | ||
72 | |||
73 | If in doubt, say N. | ||
74 | |||
75 | config X86_POWERNOW_K7_ACPI | ||
76 | bool | ||
77 | depends on X86_POWERNOW_K7 && ACPI_PROCESSOR | ||
78 | depends on !(X86_POWERNOW_K7 = y && ACPI_PROCESSOR = m) | ||
79 | default y | ||
80 | |||
81 | config X86_POWERNOW_K8 | ||
82 | tristate "AMD Opteron/Athlon64 PowerNow!" | ||
83 | select CPU_FREQ_TABLE | ||
84 | depends on EXPERIMENTAL | ||
85 | help | ||
86 | This adds the CPUFreq driver for mobile AMD Opteron/Athlon64 processors. | ||
87 | |||
88 | For details, take a look at <file:Documentation/cpu-freq/>. | ||
89 | |||
90 | If in doubt, say N. | ||
91 | |||
92 | config X86_POWERNOW_K8_ACPI | ||
93 | bool "ACPI Support" | ||
94 | select ACPI_PROCESSOR | ||
95 | depends on ACPI && X86_POWERNOW_K8 | ||
96 | default y | ||
97 | help | ||
98 | This provides access to the K8s Processor Performance States via ACPI. | ||
99 | This driver is probably required for CPUFreq to work with multi-socket and | ||
100 | SMP systems. It is not required on at least some single-socket yet | ||
101 | multi-core systems, even if SMP is enabled. | ||
102 | |||
103 | It is safe to say Y here. | ||
104 | |||
105 | config X86_GX_SUSPMOD | ||
106 | tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" | ||
107 | depends on PCI | ||
108 | help | ||
109 | This add the CPUFreq driver for NatSemi Geode processors which | ||
110 | support suspend modulation. | ||
111 | |||
112 | For details, take a look at <file:Documentation/cpu-freq/>. | ||
113 | |||
114 | If in doubt, say N. | ||
115 | |||
116 | config X86_SPEEDSTEP_CENTRINO | ||
117 | tristate "Intel Enhanced SpeedStep" | ||
118 | select CPU_FREQ_TABLE | ||
119 | select X86_SPEEDSTEP_CENTRINO_TABLE | ||
120 | help | ||
121 | This adds the CPUFreq driver for Enhanced SpeedStep enabled | ||
122 | mobile CPUs. This means Intel Pentium M (Centrino) CPUs. However, | ||
123 | you also need to say Y to "Use ACPI tables to decode..." below | ||
124 | [which might imply enabling ACPI] if you want to use this driver | ||
125 | on non-Banias CPUs. | ||
126 | |||
127 | For details, take a look at <file:Documentation/cpu-freq/>. | ||
128 | |||
129 | If in doubt, say N. | ||
130 | |||
131 | config X86_SPEEDSTEP_CENTRINO_TABLE | ||
132 | bool "Built-in tables for Banias CPUs" | ||
133 | depends on X86_SPEEDSTEP_CENTRINO | ||
134 | default y | ||
135 | help | ||
136 | Use built-in tables for Banias CPUs if ACPI encoding | ||
137 | is not available. | ||
138 | |||
139 | If in doubt, say N. | ||
140 | |||
141 | config X86_SPEEDSTEP_ICH | ||
142 | tristate "Intel Speedstep on ICH-M chipsets (ioport interface)" | ||
143 | select CPU_FREQ_TABLE | ||
144 | help | ||
145 | This adds the CPUFreq driver for certain mobile Intel Pentium III | ||
146 | (Coppermine), all mobile Intel Pentium III-M (Tualatin) and all | ||
147 | mobile Intel Pentium 4 P4-M on systems which have an Intel ICH2, | ||
148 | ICH3 or ICH4 southbridge. | ||
149 | |||
150 | For details, take a look at <file:Documentation/cpu-freq/>. | ||
151 | |||
152 | If in doubt, say N. | ||
153 | |||
154 | config X86_SPEEDSTEP_SMI | ||
155 | tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)" | ||
156 | select CPU_FREQ_TABLE | ||
157 | depends on EXPERIMENTAL | ||
158 | help | ||
159 | This adds the CPUFreq driver for certain mobile Intel Pentium III | ||
160 | (Coppermine), all mobile Intel Pentium III-M (Tualatin) | ||
161 | on systems which have an Intel 440BX/ZX/MX southbridge. | ||
162 | |||
163 | For details, take a look at <file:Documentation/cpu-freq/>. | ||
164 | |||
165 | If in doubt, say N. | ||
166 | |||
167 | config X86_P4_CLOCKMOD | ||
168 | tristate "Intel Pentium 4 clock modulation" | ||
169 | select CPU_FREQ_TABLE | ||
170 | help | ||
171 | This adds the CPUFreq driver for Intel Pentium 4 / XEON | ||
172 | processors. | ||
173 | |||
174 | For details, take a look at <file:Documentation/cpu-freq/>. | ||
175 | |||
176 | If in doubt, say N. | ||
177 | |||
178 | config X86_CPUFREQ_NFORCE2 | ||
179 | tristate "nVidia nForce2 FSB changing" | ||
180 | depends on EXPERIMENTAL | ||
181 | help | ||
182 | This adds the CPUFreq driver for FSB changing on nVidia nForce2 | ||
183 | platforms. | ||
184 | |||
185 | For details, take a look at <file:Documentation/cpu-freq/>. | ||
186 | |||
187 | If in doubt, say N. | ||
188 | |||
189 | config X86_LONGRUN | ||
190 | tristate "Transmeta LongRun" | ||
191 | help | ||
192 | This adds the CPUFreq driver for Transmeta Crusoe and Efficeon processors | ||
193 | which support LongRun. | ||
194 | |||
195 | For details, take a look at <file:Documentation/cpu-freq/>. | ||
196 | |||
197 | If in doubt, say N. | ||
198 | |||
199 | config X86_LONGHAUL | ||
200 | tristate "VIA Cyrix III Longhaul" | ||
201 | select CPU_FREQ_TABLE | ||
202 | depends on ACPI_PROCESSOR | ||
203 | help | ||
204 | This adds the CPUFreq driver for VIA Samuel/CyrixIII, | ||
205 | VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T | ||
206 | processors. | ||
207 | |||
208 | For details, take a look at <file:Documentation/cpu-freq/>. | ||
209 | |||
210 | If in doubt, say N. | ||
211 | |||
212 | config X86_E_POWERSAVER | ||
213 | tristate "VIA C7 Enhanced PowerSaver (EXPERIMENTAL)" | ||
214 | select CPU_FREQ_TABLE | ||
215 | depends on EXPERIMENTAL | ||
216 | help | ||
217 | This adds the CPUFreq driver for VIA C7 processors. | ||
218 | |||
219 | If in doubt, say N. | ||
220 | |||
221 | comment "shared options" | ||
222 | |||
223 | config X86_ACPI_CPUFREQ_PROC_INTF | ||
224 | bool "/proc/acpi/processor/../performance interface (deprecated)" | ||
225 | depends on PROC_FS | ||
226 | depends on X86_ACPI_CPUFREQ || X86_POWERNOW_K7_ACPI || X86_POWERNOW_K8_ACPI | ||
227 | help | ||
228 | This enables the deprecated /proc/acpi/processor/../performance | ||
229 | interface. While it is helpful for debugging, the generic, | ||
230 | cross-architecture cpufreq interfaces should be used. | ||
231 | |||
232 | If in doubt, say N. | ||
233 | |||
234 | config X86_SPEEDSTEP_LIB | ||
235 | tristate | ||
236 | default X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD | ||
237 | |||
238 | config X86_SPEEDSTEP_RELAXED_CAP_CHECK | ||
239 | bool "Relaxed speedstep capability checks" | ||
240 | depends on (X86_SPEEDSTEP_SMI || X86_SPEEDSTEP_ICH) | ||
241 | help | ||
242 | Don't perform all checks for a speedstep capable system which would | ||
243 | normally be done. Some ancient or strange systems, though speedstep | ||
244 | capable, don't always indicate that they are speedstep capable. This | ||
245 | option lets the probing code bypass some of those checks if the | ||
246 | parameter "relaxed_check=1" is passed to the module. | ||
247 | |||
248 | endif # CPU_FREQ | ||
249 | |||
250 | endmenu | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/Makefile b/arch/x86/kernel/cpu/cpufreq/Makefile new file mode 100644 index 000000000000..560f7760dae5 --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/Makefile | |||
@@ -0,0 +1,16 @@ | |||
1 | obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o | ||
2 | obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o | ||
3 | obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o | ||
4 | obj-$(CONFIG_X86_LONGHAUL) += longhaul.o | ||
5 | obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o | ||
6 | obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o | ||
7 | obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o | ||
8 | obj-$(CONFIG_X86_LONGRUN) += longrun.o | ||
9 | obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o | ||
10 | obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o | ||
11 | obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o | ||
12 | obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o | ||
13 | obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o | ||
14 | obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o | ||
15 | obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o | ||
16 | obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c new file mode 100644 index 000000000000..705e13a30781 --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -0,0 +1,799 @@ | |||
1 | /* | ||
2 | * acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.4 $) | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> | ||
5 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> | ||
6 | * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> | ||
7 | * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> | ||
8 | * | ||
9 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or (at | ||
14 | * your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, but | ||
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
19 | * General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License along | ||
22 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
23 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
24 | * | ||
25 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
26 | */ | ||
27 | |||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/smp.h> | ||
32 | #include <linux/sched.h> | ||
33 | #include <linux/cpufreq.h> | ||
34 | #include <linux/compiler.h> | ||
35 | #include <linux/dmi.h> | ||
36 | |||
37 | #include <linux/acpi.h> | ||
38 | #include <acpi/processor.h> | ||
39 | |||
40 | #include <asm/io.h> | ||
41 | #include <asm/msr.h> | ||
42 | #include <asm/processor.h> | ||
43 | #include <asm/cpufeature.h> | ||
44 | #include <asm/delay.h> | ||
45 | #include <asm/uaccess.h> | ||
46 | |||
47 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg) | ||
48 | |||
49 | MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); | ||
50 | MODULE_DESCRIPTION("ACPI Processor P-States Driver"); | ||
51 | MODULE_LICENSE("GPL"); | ||
52 | |||
53 | enum { | ||
54 | UNDEFINED_CAPABLE = 0, | ||
55 | SYSTEM_INTEL_MSR_CAPABLE, | ||
56 | SYSTEM_IO_CAPABLE, | ||
57 | }; | ||
58 | |||
59 | #define INTEL_MSR_RANGE (0xffff) | ||
60 | #define CPUID_6_ECX_APERFMPERF_CAPABILITY (0x1) | ||
61 | |||
62 | struct acpi_cpufreq_data { | ||
63 | struct acpi_processor_performance *acpi_data; | ||
64 | struct cpufreq_frequency_table *freq_table; | ||
65 | unsigned int max_freq; | ||
66 | unsigned int resume; | ||
67 | unsigned int cpu_feature; | ||
68 | }; | ||
69 | |||
70 | static struct acpi_cpufreq_data *drv_data[NR_CPUS]; | ||
71 | /* acpi_perf_data is a pointer to percpu data. */ | ||
72 | static struct acpi_processor_performance *acpi_perf_data; | ||
73 | |||
74 | static struct cpufreq_driver acpi_cpufreq_driver; | ||
75 | |||
76 | static unsigned int acpi_pstate_strict; | ||
77 | |||
78 | static int check_est_cpu(unsigned int cpuid) | ||
79 | { | ||
80 | struct cpuinfo_x86 *cpu = &cpu_data[cpuid]; | ||
81 | |||
82 | if (cpu->x86_vendor != X86_VENDOR_INTEL || | ||
83 | !cpu_has(cpu, X86_FEATURE_EST)) | ||
84 | return 0; | ||
85 | |||
86 | return 1; | ||
87 | } | ||
88 | |||
89 | static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) | ||
90 | { | ||
91 | struct acpi_processor_performance *perf; | ||
92 | int i; | ||
93 | |||
94 | perf = data->acpi_data; | ||
95 | |||
96 | for (i=0; i<perf->state_count; i++) { | ||
97 | if (value == perf->states[i].status) | ||
98 | return data->freq_table[i].frequency; | ||
99 | } | ||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) | ||
104 | { | ||
105 | int i; | ||
106 | struct acpi_processor_performance *perf; | ||
107 | |||
108 | msr &= INTEL_MSR_RANGE; | ||
109 | perf = data->acpi_data; | ||
110 | |||
111 | for (i=0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { | ||
112 | if (msr == perf->states[data->freq_table[i].index].status) | ||
113 | return data->freq_table[i].frequency; | ||
114 | } | ||
115 | return data->freq_table[0].frequency; | ||
116 | } | ||
117 | |||
118 | static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) | ||
119 | { | ||
120 | switch (data->cpu_feature) { | ||
121 | case SYSTEM_INTEL_MSR_CAPABLE: | ||
122 | return extract_msr(val, data); | ||
123 | case SYSTEM_IO_CAPABLE: | ||
124 | return extract_io(val, data); | ||
125 | default: | ||
126 | return 0; | ||
127 | } | ||
128 | } | ||
129 | |||
130 | struct msr_addr { | ||
131 | u32 reg; | ||
132 | }; | ||
133 | |||
134 | struct io_addr { | ||
135 | u16 port; | ||
136 | u8 bit_width; | ||
137 | }; | ||
138 | |||
139 | typedef union { | ||
140 | struct msr_addr msr; | ||
141 | struct io_addr io; | ||
142 | } drv_addr_union; | ||
143 | |||
144 | struct drv_cmd { | ||
145 | unsigned int type; | ||
146 | cpumask_t mask; | ||
147 | drv_addr_union addr; | ||
148 | u32 val; | ||
149 | }; | ||
150 | |||
151 | static void do_drv_read(struct drv_cmd *cmd) | ||
152 | { | ||
153 | u32 h; | ||
154 | |||
155 | switch (cmd->type) { | ||
156 | case SYSTEM_INTEL_MSR_CAPABLE: | ||
157 | rdmsr(cmd->addr.msr.reg, cmd->val, h); | ||
158 | break; | ||
159 | case SYSTEM_IO_CAPABLE: | ||
160 | acpi_os_read_port((acpi_io_address)cmd->addr.io.port, | ||
161 | &cmd->val, | ||
162 | (u32)cmd->addr.io.bit_width); | ||
163 | break; | ||
164 | default: | ||
165 | break; | ||
166 | } | ||
167 | } | ||
168 | |||
169 | static void do_drv_write(struct drv_cmd *cmd) | ||
170 | { | ||
171 | u32 lo, hi; | ||
172 | |||
173 | switch (cmd->type) { | ||
174 | case SYSTEM_INTEL_MSR_CAPABLE: | ||
175 | rdmsr(cmd->addr.msr.reg, lo, hi); | ||
176 | lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); | ||
177 | wrmsr(cmd->addr.msr.reg, lo, hi); | ||
178 | break; | ||
179 | case SYSTEM_IO_CAPABLE: | ||
180 | acpi_os_write_port((acpi_io_address)cmd->addr.io.port, | ||
181 | cmd->val, | ||
182 | (u32)cmd->addr.io.bit_width); | ||
183 | break; | ||
184 | default: | ||
185 | break; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | static void drv_read(struct drv_cmd *cmd) | ||
190 | { | ||
191 | cpumask_t saved_mask = current->cpus_allowed; | ||
192 | cmd->val = 0; | ||
193 | |||
194 | set_cpus_allowed(current, cmd->mask); | ||
195 | do_drv_read(cmd); | ||
196 | set_cpus_allowed(current, saved_mask); | ||
197 | } | ||
198 | |||
199 | static void drv_write(struct drv_cmd *cmd) | ||
200 | { | ||
201 | cpumask_t saved_mask = current->cpus_allowed; | ||
202 | unsigned int i; | ||
203 | |||
204 | for_each_cpu_mask(i, cmd->mask) { | ||
205 | set_cpus_allowed(current, cpumask_of_cpu(i)); | ||
206 | do_drv_write(cmd); | ||
207 | } | ||
208 | |||
209 | set_cpus_allowed(current, saved_mask); | ||
210 | return; | ||
211 | } | ||
212 | |||
213 | static u32 get_cur_val(cpumask_t mask) | ||
214 | { | ||
215 | struct acpi_processor_performance *perf; | ||
216 | struct drv_cmd cmd; | ||
217 | |||
218 | if (unlikely(cpus_empty(mask))) | ||
219 | return 0; | ||
220 | |||
221 | switch (drv_data[first_cpu(mask)]->cpu_feature) { | ||
222 | case SYSTEM_INTEL_MSR_CAPABLE: | ||
223 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; | ||
224 | cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; | ||
225 | break; | ||
226 | case SYSTEM_IO_CAPABLE: | ||
227 | cmd.type = SYSTEM_IO_CAPABLE; | ||
228 | perf = drv_data[first_cpu(mask)]->acpi_data; | ||
229 | cmd.addr.io.port = perf->control_register.address; | ||
230 | cmd.addr.io.bit_width = perf->control_register.bit_width; | ||
231 | break; | ||
232 | default: | ||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | cmd.mask = mask; | ||
237 | |||
238 | drv_read(&cmd); | ||
239 | |||
240 | dprintk("get_cur_val = %u\n", cmd.val); | ||
241 | |||
242 | return cmd.val; | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Return the measured active (C0) frequency on this CPU since last call | ||
247 | * to this function. | ||
248 | * Input: cpu number | ||
249 | * Return: Average CPU frequency in terms of max frequency (zero on error) | ||
250 | * | ||
251 | * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance | ||
252 | * over a period of time, while CPU is in C0 state. | ||
253 | * IA32_MPERF counts at the rate of max advertised frequency | ||
254 | * IA32_APERF counts at the rate of actual CPU frequency | ||
255 | * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and | ||
256 | * no meaning should be associated with absolute values of these MSRs. | ||
257 | */ | ||
258 | static unsigned int get_measured_perf(unsigned int cpu) | ||
259 | { | ||
260 | union { | ||
261 | struct { | ||
262 | u32 lo; | ||
263 | u32 hi; | ||
264 | } split; | ||
265 | u64 whole; | ||
266 | } aperf_cur, mperf_cur; | ||
267 | |||
268 | cpumask_t saved_mask; | ||
269 | unsigned int perf_percent; | ||
270 | unsigned int retval; | ||
271 | |||
272 | saved_mask = current->cpus_allowed; | ||
273 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
274 | if (get_cpu() != cpu) { | ||
275 | /* We were not able to run on requested processor */ | ||
276 | put_cpu(); | ||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | rdmsr(MSR_IA32_APERF, aperf_cur.split.lo, aperf_cur.split.hi); | ||
281 | rdmsr(MSR_IA32_MPERF, mperf_cur.split.lo, mperf_cur.split.hi); | ||
282 | |||
283 | wrmsr(MSR_IA32_APERF, 0,0); | ||
284 | wrmsr(MSR_IA32_MPERF, 0,0); | ||
285 | |||
286 | #ifdef __i386__ | ||
287 | /* | ||
288 | * We dont want to do 64 bit divide with 32 bit kernel | ||
289 | * Get an approximate value. Return failure in case we cannot get | ||
290 | * an approximate value. | ||
291 | */ | ||
292 | if (unlikely(aperf_cur.split.hi || mperf_cur.split.hi)) { | ||
293 | int shift_count; | ||
294 | u32 h; | ||
295 | |||
296 | h = max_t(u32, aperf_cur.split.hi, mperf_cur.split.hi); | ||
297 | shift_count = fls(h); | ||
298 | |||
299 | aperf_cur.whole >>= shift_count; | ||
300 | mperf_cur.whole >>= shift_count; | ||
301 | } | ||
302 | |||
303 | if (((unsigned long)(-1) / 100) < aperf_cur.split.lo) { | ||
304 | int shift_count = 7; | ||
305 | aperf_cur.split.lo >>= shift_count; | ||
306 | mperf_cur.split.lo >>= shift_count; | ||
307 | } | ||
308 | |||
309 | if (aperf_cur.split.lo && mperf_cur.split.lo) | ||
310 | perf_percent = (aperf_cur.split.lo * 100) / mperf_cur.split.lo; | ||
311 | else | ||
312 | perf_percent = 0; | ||
313 | |||
314 | #else | ||
315 | if (unlikely(((unsigned long)(-1) / 100) < aperf_cur.whole)) { | ||
316 | int shift_count = 7; | ||
317 | aperf_cur.whole >>= shift_count; | ||
318 | mperf_cur.whole >>= shift_count; | ||
319 | } | ||
320 | |||
321 | if (aperf_cur.whole && mperf_cur.whole) | ||
322 | perf_percent = (aperf_cur.whole * 100) / mperf_cur.whole; | ||
323 | else | ||
324 | perf_percent = 0; | ||
325 | |||
326 | #endif | ||
327 | |||
328 | retval = drv_data[cpu]->max_freq * perf_percent / 100; | ||
329 | |||
330 | put_cpu(); | ||
331 | set_cpus_allowed(current, saved_mask); | ||
332 | |||
333 | dprintk("cpu %d: performance percent %d\n", cpu, perf_percent); | ||
334 | return retval; | ||
335 | } | ||
336 | |||
337 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | ||
338 | { | ||
339 | struct acpi_cpufreq_data *data = drv_data[cpu]; | ||
340 | unsigned int freq; | ||
341 | |||
342 | dprintk("get_cur_freq_on_cpu (%d)\n", cpu); | ||
343 | |||
344 | if (unlikely(data == NULL || | ||
345 | data->acpi_data == NULL || data->freq_table == NULL)) { | ||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | freq = extract_freq(get_cur_val(cpumask_of_cpu(cpu)), data); | ||
350 | dprintk("cur freq = %u\n", freq); | ||
351 | |||
352 | return freq; | ||
353 | } | ||
354 | |||
355 | static unsigned int check_freqs(cpumask_t mask, unsigned int freq, | ||
356 | struct acpi_cpufreq_data *data) | ||
357 | { | ||
358 | unsigned int cur_freq; | ||
359 | unsigned int i; | ||
360 | |||
361 | for (i=0; i<100; i++) { | ||
362 | cur_freq = extract_freq(get_cur_val(mask), data); | ||
363 | if (cur_freq == freq) | ||
364 | return 1; | ||
365 | udelay(10); | ||
366 | } | ||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, | ||
371 | unsigned int target_freq, unsigned int relation) | ||
372 | { | ||
373 | struct acpi_cpufreq_data *data = drv_data[policy->cpu]; | ||
374 | struct acpi_processor_performance *perf; | ||
375 | struct cpufreq_freqs freqs; | ||
376 | cpumask_t online_policy_cpus; | ||
377 | struct drv_cmd cmd; | ||
378 | unsigned int next_state = 0; /* Index into freq_table */ | ||
379 | unsigned int next_perf_state = 0; /* Index into perf table */ | ||
380 | unsigned int i; | ||
381 | int result = 0; | ||
382 | |||
383 | dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); | ||
384 | |||
385 | if (unlikely(data == NULL || | ||
386 | data->acpi_data == NULL || data->freq_table == NULL)) { | ||
387 | return -ENODEV; | ||
388 | } | ||
389 | |||
390 | perf = data->acpi_data; | ||
391 | result = cpufreq_frequency_table_target(policy, | ||
392 | data->freq_table, | ||
393 | target_freq, | ||
394 | relation, &next_state); | ||
395 | if (unlikely(result)) | ||
396 | return -ENODEV; | ||
397 | |||
398 | #ifdef CONFIG_HOTPLUG_CPU | ||
399 | /* cpufreq holds the hotplug lock, so we are safe from here on */ | ||
400 | cpus_and(online_policy_cpus, cpu_online_map, policy->cpus); | ||
401 | #else | ||
402 | online_policy_cpus = policy->cpus; | ||
403 | #endif | ||
404 | |||
405 | next_perf_state = data->freq_table[next_state].index; | ||
406 | if (perf->state == next_perf_state) { | ||
407 | if (unlikely(data->resume)) { | ||
408 | dprintk("Called after resume, resetting to P%d\n", | ||
409 | next_perf_state); | ||
410 | data->resume = 0; | ||
411 | } else { | ||
412 | dprintk("Already at target state (P%d)\n", | ||
413 | next_perf_state); | ||
414 | return 0; | ||
415 | } | ||
416 | } | ||
417 | |||
418 | switch (data->cpu_feature) { | ||
419 | case SYSTEM_INTEL_MSR_CAPABLE: | ||
420 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; | ||
421 | cmd.addr.msr.reg = MSR_IA32_PERF_CTL; | ||
422 | cmd.val = (u32) perf->states[next_perf_state].control; | ||
423 | break; | ||
424 | case SYSTEM_IO_CAPABLE: | ||
425 | cmd.type = SYSTEM_IO_CAPABLE; | ||
426 | cmd.addr.io.port = perf->control_register.address; | ||
427 | cmd.addr.io.bit_width = perf->control_register.bit_width; | ||
428 | cmd.val = (u32) perf->states[next_perf_state].control; | ||
429 | break; | ||
430 | default: | ||
431 | return -ENODEV; | ||
432 | } | ||
433 | |||
434 | cpus_clear(cmd.mask); | ||
435 | |||
436 | if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) | ||
437 | cmd.mask = online_policy_cpus; | ||
438 | else | ||
439 | cpu_set(policy->cpu, cmd.mask); | ||
440 | |||
441 | freqs.old = perf->states[perf->state].core_frequency * 1000; | ||
442 | freqs.new = data->freq_table[next_state].frequency; | ||
443 | for_each_cpu_mask(i, cmd.mask) { | ||
444 | freqs.cpu = i; | ||
445 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
446 | } | ||
447 | |||
448 | drv_write(&cmd); | ||
449 | |||
450 | if (acpi_pstate_strict) { | ||
451 | if (!check_freqs(cmd.mask, freqs.new, data)) { | ||
452 | dprintk("acpi_cpufreq_target failed (%d)\n", | ||
453 | policy->cpu); | ||
454 | return -EAGAIN; | ||
455 | } | ||
456 | } | ||
457 | |||
458 | for_each_cpu_mask(i, cmd.mask) { | ||
459 | freqs.cpu = i; | ||
460 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
461 | } | ||
462 | perf->state = next_perf_state; | ||
463 | |||
464 | return result; | ||
465 | } | ||
466 | |||
467 | static int acpi_cpufreq_verify(struct cpufreq_policy *policy) | ||
468 | { | ||
469 | struct acpi_cpufreq_data *data = drv_data[policy->cpu]; | ||
470 | |||
471 | dprintk("acpi_cpufreq_verify\n"); | ||
472 | |||
473 | return cpufreq_frequency_table_verify(policy, data->freq_table); | ||
474 | } | ||
475 | |||
476 | static unsigned long | ||
477 | acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) | ||
478 | { | ||
479 | struct acpi_processor_performance *perf = data->acpi_data; | ||
480 | |||
481 | if (cpu_khz) { | ||
482 | /* search the closest match to cpu_khz */ | ||
483 | unsigned int i; | ||
484 | unsigned long freq; | ||
485 | unsigned long freqn = perf->states[0].core_frequency * 1000; | ||
486 | |||
487 | for (i=0; i<(perf->state_count-1); i++) { | ||
488 | freq = freqn; | ||
489 | freqn = perf->states[i+1].core_frequency * 1000; | ||
490 | if ((2 * cpu_khz) > (freqn + freq)) { | ||
491 | perf->state = i; | ||
492 | return freq; | ||
493 | } | ||
494 | } | ||
495 | perf->state = perf->state_count-1; | ||
496 | return freqn; | ||
497 | } else { | ||
498 | /* assume CPU is at P0... */ | ||
499 | perf->state = 0; | ||
500 | return perf->states[0].core_frequency * 1000; | ||
501 | } | ||
502 | } | ||
503 | |||
504 | /* | ||
505 | * acpi_cpufreq_early_init - initialize ACPI P-States library | ||
506 | * | ||
507 | * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c) | ||
508 | * in order to determine correct frequency and voltage pairings. We can | ||
509 | * do _PDC and _PSD and find out the processor dependency for the | ||
510 | * actual init that will happen later... | ||
511 | */ | ||
512 | static int __init acpi_cpufreq_early_init(void) | ||
513 | { | ||
514 | dprintk("acpi_cpufreq_early_init\n"); | ||
515 | |||
516 | acpi_perf_data = alloc_percpu(struct acpi_processor_performance); | ||
517 | if (!acpi_perf_data) { | ||
518 | dprintk("Memory allocation error for acpi_perf_data.\n"); | ||
519 | return -ENOMEM; | ||
520 | } | ||
521 | |||
522 | /* Do initialization in ACPI core */ | ||
523 | acpi_processor_preregister_performance(acpi_perf_data); | ||
524 | return 0; | ||
525 | } | ||
526 | |||
527 | #ifdef CONFIG_SMP | ||
528 | /* | ||
529 | * Some BIOSes do SW_ANY coordination internally, either set it up in hw | ||
530 | * or do it in BIOS firmware and won't inform about it to OS. If not | ||
531 | * detected, this has a side effect of making CPU run at a different speed | ||
532 | * than OS intended it to run at. Detect it and handle it cleanly. | ||
533 | */ | ||
534 | static int bios_with_sw_any_bug; | ||
535 | |||
536 | static int sw_any_bug_found(struct dmi_system_id *d) | ||
537 | { | ||
538 | bios_with_sw_any_bug = 1; | ||
539 | return 0; | ||
540 | } | ||
541 | |||
542 | static struct dmi_system_id sw_any_bug_dmi_table[] = { | ||
543 | { | ||
544 | .callback = sw_any_bug_found, | ||
545 | .ident = "Supermicro Server X6DLP", | ||
546 | .matches = { | ||
547 | DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), | ||
548 | DMI_MATCH(DMI_BIOS_VERSION, "080010"), | ||
549 | DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"), | ||
550 | }, | ||
551 | }, | ||
552 | { } | ||
553 | }; | ||
554 | #endif | ||
555 | |||
556 | static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | ||
557 | { | ||
558 | unsigned int i; | ||
559 | unsigned int valid_states = 0; | ||
560 | unsigned int cpu = policy->cpu; | ||
561 | struct acpi_cpufreq_data *data; | ||
562 | unsigned int result = 0; | ||
563 | struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; | ||
564 | struct acpi_processor_performance *perf; | ||
565 | |||
566 | dprintk("acpi_cpufreq_cpu_init\n"); | ||
567 | |||
568 | data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); | ||
569 | if (!data) | ||
570 | return -ENOMEM; | ||
571 | |||
572 | data->acpi_data = percpu_ptr(acpi_perf_data, cpu); | ||
573 | drv_data[cpu] = data; | ||
574 | |||
575 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) | ||
576 | acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; | ||
577 | |||
578 | result = acpi_processor_register_performance(data->acpi_data, cpu); | ||
579 | if (result) | ||
580 | goto err_free; | ||
581 | |||
582 | perf = data->acpi_data; | ||
583 | policy->shared_type = perf->shared_type; | ||
584 | |||
585 | /* | ||
586 | * Will let policy->cpus know about dependency only when software | ||
587 | * coordination is required. | ||
588 | */ | ||
589 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || | ||
590 | policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { | ||
591 | policy->cpus = perf->shared_cpu_map; | ||
592 | } | ||
593 | |||
594 | #ifdef CONFIG_SMP | ||
595 | dmi_check_system(sw_any_bug_dmi_table); | ||
596 | if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) { | ||
597 | policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; | ||
598 | policy->cpus = cpu_core_map[cpu]; | ||
599 | } | ||
600 | #endif | ||
601 | |||
602 | /* capability check */ | ||
603 | if (perf->state_count <= 1) { | ||
604 | dprintk("No P-States\n"); | ||
605 | result = -ENODEV; | ||
606 | goto err_unreg; | ||
607 | } | ||
608 | |||
609 | if (perf->control_register.space_id != perf->status_register.space_id) { | ||
610 | result = -ENODEV; | ||
611 | goto err_unreg; | ||
612 | } | ||
613 | |||
614 | switch (perf->control_register.space_id) { | ||
615 | case ACPI_ADR_SPACE_SYSTEM_IO: | ||
616 | dprintk("SYSTEM IO addr space\n"); | ||
617 | data->cpu_feature = SYSTEM_IO_CAPABLE; | ||
618 | break; | ||
619 | case ACPI_ADR_SPACE_FIXED_HARDWARE: | ||
620 | dprintk("HARDWARE addr space\n"); | ||
621 | if (!check_est_cpu(cpu)) { | ||
622 | result = -ENODEV; | ||
623 | goto err_unreg; | ||
624 | } | ||
625 | data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; | ||
626 | break; | ||
627 | default: | ||
628 | dprintk("Unknown addr space %d\n", | ||
629 | (u32) (perf->control_register.space_id)); | ||
630 | result = -ENODEV; | ||
631 | goto err_unreg; | ||
632 | } | ||
633 | |||
634 | data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * | ||
635 | (perf->state_count+1), GFP_KERNEL); | ||
636 | if (!data->freq_table) { | ||
637 | result = -ENOMEM; | ||
638 | goto err_unreg; | ||
639 | } | ||
640 | |||
641 | /* detect transition latency */ | ||
642 | policy->cpuinfo.transition_latency = 0; | ||
643 | for (i=0; i<perf->state_count; i++) { | ||
644 | if ((perf->states[i].transition_latency * 1000) > | ||
645 | policy->cpuinfo.transition_latency) | ||
646 | policy->cpuinfo.transition_latency = | ||
647 | perf->states[i].transition_latency * 1000; | ||
648 | } | ||
649 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
650 | |||
651 | data->max_freq = perf->states[0].core_frequency * 1000; | ||
652 | /* table init */ | ||
653 | for (i=0; i<perf->state_count; i++) { | ||
654 | if (i>0 && perf->states[i].core_frequency >= | ||
655 | data->freq_table[valid_states-1].frequency / 1000) | ||
656 | continue; | ||
657 | |||
658 | data->freq_table[valid_states].index = i; | ||
659 | data->freq_table[valid_states].frequency = | ||
660 | perf->states[i].core_frequency * 1000; | ||
661 | valid_states++; | ||
662 | } | ||
663 | data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; | ||
664 | perf->state = 0; | ||
665 | |||
666 | result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); | ||
667 | if (result) | ||
668 | goto err_freqfree; | ||
669 | |||
670 | switch (perf->control_register.space_id) { | ||
671 | case ACPI_ADR_SPACE_SYSTEM_IO: | ||
672 | /* Current speed is unknown and not detectable by IO port */ | ||
673 | policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); | ||
674 | break; | ||
675 | case ACPI_ADR_SPACE_FIXED_HARDWARE: | ||
676 | acpi_cpufreq_driver.get = get_cur_freq_on_cpu; | ||
677 | policy->cur = get_cur_freq_on_cpu(cpu); | ||
678 | break; | ||
679 | default: | ||
680 | break; | ||
681 | } | ||
682 | |||
683 | /* notify BIOS that we exist */ | ||
684 | acpi_processor_notify_smm(THIS_MODULE); | ||
685 | |||
686 | /* Check for APERF/MPERF support in hardware */ | ||
687 | if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6) { | ||
688 | unsigned int ecx; | ||
689 | ecx = cpuid_ecx(6); | ||
690 | if (ecx & CPUID_6_ECX_APERFMPERF_CAPABILITY) | ||
691 | acpi_cpufreq_driver.getavg = get_measured_perf; | ||
692 | } | ||
693 | |||
694 | dprintk("CPU%u - ACPI performance management activated.\n", cpu); | ||
695 | for (i = 0; i < perf->state_count; i++) | ||
696 | dprintk(" %cP%d: %d MHz, %d mW, %d uS\n", | ||
697 | (i == perf->state ? '*' : ' '), i, | ||
698 | (u32) perf->states[i].core_frequency, | ||
699 | (u32) perf->states[i].power, | ||
700 | (u32) perf->states[i].transition_latency); | ||
701 | |||
702 | cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); | ||
703 | |||
704 | /* | ||
705 | * the first call to ->target() should result in us actually | ||
706 | * writing something to the appropriate registers. | ||
707 | */ | ||
708 | data->resume = 1; | ||
709 | |||
710 | return result; | ||
711 | |||
712 | err_freqfree: | ||
713 | kfree(data->freq_table); | ||
714 | err_unreg: | ||
715 | acpi_processor_unregister_performance(perf, cpu); | ||
716 | err_free: | ||
717 | kfree(data); | ||
718 | drv_data[cpu] = NULL; | ||
719 | |||
720 | return result; | ||
721 | } | ||
722 | |||
723 | static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) | ||
724 | { | ||
725 | struct acpi_cpufreq_data *data = drv_data[policy->cpu]; | ||
726 | |||
727 | dprintk("acpi_cpufreq_cpu_exit\n"); | ||
728 | |||
729 | if (data) { | ||
730 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
731 | drv_data[policy->cpu] = NULL; | ||
732 | acpi_processor_unregister_performance(data->acpi_data, | ||
733 | policy->cpu); | ||
734 | kfree(data); | ||
735 | } | ||
736 | |||
737 | return 0; | ||
738 | } | ||
739 | |||
740 | static int acpi_cpufreq_resume(struct cpufreq_policy *policy) | ||
741 | { | ||
742 | struct acpi_cpufreq_data *data = drv_data[policy->cpu]; | ||
743 | |||
744 | dprintk("acpi_cpufreq_resume\n"); | ||
745 | |||
746 | data->resume = 1; | ||
747 | |||
748 | return 0; | ||
749 | } | ||
750 | |||
751 | static struct freq_attr *acpi_cpufreq_attr[] = { | ||
752 | &cpufreq_freq_attr_scaling_available_freqs, | ||
753 | NULL, | ||
754 | }; | ||
755 | |||
756 | static struct cpufreq_driver acpi_cpufreq_driver = { | ||
757 | .verify = acpi_cpufreq_verify, | ||
758 | .target = acpi_cpufreq_target, | ||
759 | .init = acpi_cpufreq_cpu_init, | ||
760 | .exit = acpi_cpufreq_cpu_exit, | ||
761 | .resume = acpi_cpufreq_resume, | ||
762 | .name = "acpi-cpufreq", | ||
763 | .owner = THIS_MODULE, | ||
764 | .attr = acpi_cpufreq_attr, | ||
765 | }; | ||
766 | |||
767 | static int __init acpi_cpufreq_init(void) | ||
768 | { | ||
769 | int ret; | ||
770 | |||
771 | dprintk("acpi_cpufreq_init\n"); | ||
772 | |||
773 | ret = acpi_cpufreq_early_init(); | ||
774 | if (ret) | ||
775 | return ret; | ||
776 | |||
777 | return cpufreq_register_driver(&acpi_cpufreq_driver); | ||
778 | } | ||
779 | |||
780 | static void __exit acpi_cpufreq_exit(void) | ||
781 | { | ||
782 | dprintk("acpi_cpufreq_exit\n"); | ||
783 | |||
784 | cpufreq_unregister_driver(&acpi_cpufreq_driver); | ||
785 | |||
786 | free_percpu(acpi_perf_data); | ||
787 | |||
788 | return; | ||
789 | } | ||
790 | |||
791 | module_param(acpi_pstate_strict, uint, 0644); | ||
792 | MODULE_PARM_DESC(acpi_pstate_strict, | ||
793 | "value 0 or non-zero. non-zero -> strict ACPI checks are " | ||
794 | "performed during frequency changes."); | ||
795 | |||
796 | late_initcall(acpi_cpufreq_init); | ||
797 | module_exit(acpi_cpufreq_exit); | ||
798 | |||
799 | MODULE_ALIAS("acpi"); | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c new file mode 100644 index 000000000000..66acd5039918 --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c | |||
@@ -0,0 +1,441 @@ | |||
1 | /* | ||
2 | * (C) 2004-2006 Sebastian Witt <se.witt@gmx.net> | ||
3 | * | ||
4 | * Licensed under the terms of the GNU GPL License version 2. | ||
5 | * Based upon reverse engineered information | ||
6 | * | ||
7 | * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/moduleparam.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/cpufreq.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/delay.h> | ||
17 | |||
18 | #define NFORCE2_XTAL 25 | ||
19 | #define NFORCE2_BOOTFSB 0x48 | ||
20 | #define NFORCE2_PLLENABLE 0xa8 | ||
21 | #define NFORCE2_PLLREG 0xa4 | ||
22 | #define NFORCE2_PLLADR 0xa0 | ||
23 | #define NFORCE2_PLL(mul, div) (0x100000 | (mul << 8) | div) | ||
24 | |||
25 | #define NFORCE2_MIN_FSB 50 | ||
26 | #define NFORCE2_SAFE_DISTANCE 50 | ||
27 | |||
28 | /* Delay in ms between FSB changes */ | ||
29 | //#define NFORCE2_DELAY 10 | ||
30 | |||
31 | /* nforce2_chipset: | ||
32 | * FSB is changed using the chipset | ||
33 | */ | ||
34 | static struct pci_dev *nforce2_chipset_dev; | ||
35 | |||
36 | /* fid: | ||
37 | * multiplier * 10 | ||
38 | */ | ||
39 | static int fid = 0; | ||
40 | |||
41 | /* min_fsb, max_fsb: | ||
42 | * minimum and maximum FSB (= FSB at boot time) | ||
43 | */ | ||
44 | static int min_fsb = 0; | ||
45 | static int max_fsb = 0; | ||
46 | |||
47 | MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>"); | ||
48 | MODULE_DESCRIPTION("nForce2 FSB changing cpufreq driver"); | ||
49 | MODULE_LICENSE("GPL"); | ||
50 | |||
51 | module_param(fid, int, 0444); | ||
52 | module_param(min_fsb, int, 0444); | ||
53 | |||
54 | MODULE_PARM_DESC(fid, "CPU multiplier to use (11.5 = 115)"); | ||
55 | MODULE_PARM_DESC(min_fsb, | ||
56 | "Minimum FSB to use, if not defined: current FSB - 50"); | ||
57 | |||
58 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "cpufreq-nforce2", msg) | ||
59 | |||
60 | /** | ||
61 | * nforce2_calc_fsb - calculate FSB | ||
62 | * @pll: PLL value | ||
63 | * | ||
64 | * Calculates FSB from PLL value | ||
65 | */ | ||
66 | static int nforce2_calc_fsb(int pll) | ||
67 | { | ||
68 | unsigned char mul, div; | ||
69 | |||
70 | mul = (pll >> 8) & 0xff; | ||
71 | div = pll & 0xff; | ||
72 | |||
73 | if (div > 0) | ||
74 | return NFORCE2_XTAL * mul / div; | ||
75 | |||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | /** | ||
80 | * nforce2_calc_pll - calculate PLL value | ||
81 | * @fsb: FSB | ||
82 | * | ||
83 | * Calculate PLL value for given FSB | ||
84 | */ | ||
85 | static int nforce2_calc_pll(unsigned int fsb) | ||
86 | { | ||
87 | unsigned char xmul, xdiv; | ||
88 | unsigned char mul = 0, div = 0; | ||
89 | int tried = 0; | ||
90 | |||
91 | /* Try to calculate multiplier and divider up to 4 times */ | ||
92 | while (((mul == 0) || (div == 0)) && (tried <= 3)) { | ||
93 | for (xdiv = 2; xdiv <= 0x80; xdiv++) | ||
94 | for (xmul = 1; xmul <= 0xfe; xmul++) | ||
95 | if (nforce2_calc_fsb(NFORCE2_PLL(xmul, xdiv)) == | ||
96 | fsb + tried) { | ||
97 | mul = xmul; | ||
98 | div = xdiv; | ||
99 | } | ||
100 | tried++; | ||
101 | } | ||
102 | |||
103 | if ((mul == 0) || (div == 0)) | ||
104 | return -1; | ||
105 | |||
106 | return NFORCE2_PLL(mul, div); | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * nforce2_write_pll - write PLL value to chipset | ||
111 | * @pll: PLL value | ||
112 | * | ||
113 | * Writes new FSB PLL value to chipset | ||
114 | */ | ||
115 | static void nforce2_write_pll(int pll) | ||
116 | { | ||
117 | int temp; | ||
118 | |||
119 | /* Set the pll addr. to 0x00 */ | ||
120 | pci_write_config_dword(nforce2_chipset_dev, NFORCE2_PLLADR, 0); | ||
121 | |||
122 | /* Now write the value in all 64 registers */ | ||
123 | for (temp = 0; temp <= 0x3f; temp++) | ||
124 | pci_write_config_dword(nforce2_chipset_dev, NFORCE2_PLLREG, pll); | ||
125 | |||
126 | return; | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * nforce2_fsb_read - Read FSB | ||
131 | * | ||
132 | * Read FSB from chipset | ||
133 | * If bootfsb != 0, return FSB at boot-time | ||
134 | */ | ||
135 | static unsigned int nforce2_fsb_read(int bootfsb) | ||
136 | { | ||
137 | struct pci_dev *nforce2_sub5; | ||
138 | u32 fsb, temp = 0; | ||
139 | |||
140 | /* Get chipset boot FSB from subdevice 5 (FSB at boot-time) */ | ||
141 | nforce2_sub5 = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, | ||
142 | 0x01EF,PCI_ANY_ID,PCI_ANY_ID,NULL); | ||
143 | if (!nforce2_sub5) | ||
144 | return 0; | ||
145 | |||
146 | pci_read_config_dword(nforce2_sub5, NFORCE2_BOOTFSB, &fsb); | ||
147 | fsb /= 1000000; | ||
148 | |||
149 | /* Check if PLL register is already set */ | ||
150 | pci_read_config_byte(nforce2_chipset_dev,NFORCE2_PLLENABLE, (u8 *)&temp); | ||
151 | |||
152 | if(bootfsb || !temp) | ||
153 | return fsb; | ||
154 | |||
155 | /* Use PLL register FSB value */ | ||
156 | pci_read_config_dword(nforce2_chipset_dev,NFORCE2_PLLREG, &temp); | ||
157 | fsb = nforce2_calc_fsb(temp); | ||
158 | |||
159 | return fsb; | ||
160 | } | ||
161 | |||
162 | /** | ||
163 | * nforce2_set_fsb - set new FSB | ||
164 | * @fsb: New FSB | ||
165 | * | ||
166 | * Sets new FSB | ||
167 | */ | ||
168 | static int nforce2_set_fsb(unsigned int fsb) | ||
169 | { | ||
170 | u32 temp = 0; | ||
171 | unsigned int tfsb; | ||
172 | int diff; | ||
173 | int pll = 0; | ||
174 | |||
175 | if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) { | ||
176 | printk(KERN_ERR "cpufreq: FSB %d is out of range!\n", fsb); | ||
177 | return -EINVAL; | ||
178 | } | ||
179 | |||
180 | tfsb = nforce2_fsb_read(0); | ||
181 | if (!tfsb) { | ||
182 | printk(KERN_ERR "cpufreq: Error while reading the FSB\n"); | ||
183 | return -EINVAL; | ||
184 | } | ||
185 | |||
186 | /* First write? Then set actual value */ | ||
187 | pci_read_config_byte(nforce2_chipset_dev,NFORCE2_PLLENABLE, (u8 *)&temp); | ||
188 | if (!temp) { | ||
189 | pll = nforce2_calc_pll(tfsb); | ||
190 | |||
191 | if (pll < 0) | ||
192 | return -EINVAL; | ||
193 | |||
194 | nforce2_write_pll(pll); | ||
195 | } | ||
196 | |||
197 | /* Enable write access */ | ||
198 | temp = 0x01; | ||
199 | pci_write_config_byte(nforce2_chipset_dev, NFORCE2_PLLENABLE, (u8)temp); | ||
200 | |||
201 | diff = tfsb - fsb; | ||
202 | |||
203 | if (!diff) | ||
204 | return 0; | ||
205 | |||
206 | while ((tfsb != fsb) && (tfsb <= max_fsb) && (tfsb >= min_fsb)) { | ||
207 | if (diff < 0) | ||
208 | tfsb++; | ||
209 | else | ||
210 | tfsb--; | ||
211 | |||
212 | /* Calculate the PLL reg. value */ | ||
213 | if ((pll = nforce2_calc_pll(tfsb)) == -1) | ||
214 | return -EINVAL; | ||
215 | |||
216 | nforce2_write_pll(pll); | ||
217 | #ifdef NFORCE2_DELAY | ||
218 | mdelay(NFORCE2_DELAY); | ||
219 | #endif | ||
220 | } | ||
221 | |||
222 | temp = 0x40; | ||
223 | pci_write_config_byte(nforce2_chipset_dev, NFORCE2_PLLADR, (u8)temp); | ||
224 | |||
225 | return 0; | ||
226 | } | ||
227 | |||
228 | /** | ||
229 | * nforce2_get - get the CPU frequency | ||
230 | * @cpu: CPU number | ||
231 | * | ||
232 | * Returns the CPU frequency | ||
233 | */ | ||
234 | static unsigned int nforce2_get(unsigned int cpu) | ||
235 | { | ||
236 | if (cpu) | ||
237 | return 0; | ||
238 | return nforce2_fsb_read(0) * fid * 100; | ||
239 | } | ||
240 | |||
241 | /** | ||
242 | * nforce2_target - set a new CPUFreq policy | ||
243 | * @policy: new policy | ||
244 | * @target_freq: the target frequency | ||
245 | * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) | ||
246 | * | ||
247 | * Sets a new CPUFreq policy. | ||
248 | */ | ||
249 | static int nforce2_target(struct cpufreq_policy *policy, | ||
250 | unsigned int target_freq, unsigned int relation) | ||
251 | { | ||
252 | // unsigned long flags; | ||
253 | struct cpufreq_freqs freqs; | ||
254 | unsigned int target_fsb; | ||
255 | |||
256 | if ((target_freq > policy->max) || (target_freq < policy->min)) | ||
257 | return -EINVAL; | ||
258 | |||
259 | target_fsb = target_freq / (fid * 100); | ||
260 | |||
261 | freqs.old = nforce2_get(policy->cpu); | ||
262 | freqs.new = target_fsb * fid * 100; | ||
263 | freqs.cpu = 0; /* Only one CPU on nForce2 plattforms */ | ||
264 | |||
265 | if (freqs.old == freqs.new) | ||
266 | return 0; | ||
267 | |||
268 | dprintk("Old CPU frequency %d kHz, new %d kHz\n", | ||
269 | freqs.old, freqs.new); | ||
270 | |||
271 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
272 | |||
273 | /* Disable IRQs */ | ||
274 | //local_irq_save(flags); | ||
275 | |||
276 | if (nforce2_set_fsb(target_fsb) < 0) | ||
277 | printk(KERN_ERR "cpufreq: Changing FSB to %d failed\n", | ||
278 | target_fsb); | ||
279 | else | ||
280 | dprintk("Changed FSB successfully to %d\n", | ||
281 | target_fsb); | ||
282 | |||
283 | /* Enable IRQs */ | ||
284 | //local_irq_restore(flags); | ||
285 | |||
286 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
287 | |||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | /** | ||
292 | * nforce2_verify - verifies a new CPUFreq policy | ||
293 | * @policy: new policy | ||
294 | */ | ||
295 | static int nforce2_verify(struct cpufreq_policy *policy) | ||
296 | { | ||
297 | unsigned int fsb_pol_max; | ||
298 | |||
299 | fsb_pol_max = policy->max / (fid * 100); | ||
300 | |||
301 | if (policy->min < (fsb_pol_max * fid * 100)) | ||
302 | policy->max = (fsb_pol_max + 1) * fid * 100; | ||
303 | |||
304 | cpufreq_verify_within_limits(policy, | ||
305 | policy->cpuinfo.min_freq, | ||
306 | policy->cpuinfo.max_freq); | ||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | static int nforce2_cpu_init(struct cpufreq_policy *policy) | ||
311 | { | ||
312 | unsigned int fsb; | ||
313 | unsigned int rfid; | ||
314 | |||
315 | /* capability check */ | ||
316 | if (policy->cpu != 0) | ||
317 | return -ENODEV; | ||
318 | |||
319 | /* Get current FSB */ | ||
320 | fsb = nforce2_fsb_read(0); | ||
321 | |||
322 | if (!fsb) | ||
323 | return -EIO; | ||
324 | |||
325 | /* FIX: Get FID from CPU */ | ||
326 | if (!fid) { | ||
327 | if (!cpu_khz) { | ||
328 | printk(KERN_WARNING | ||
329 | "cpufreq: cpu_khz not set, can't calculate multiplier!\n"); | ||
330 | return -ENODEV; | ||
331 | } | ||
332 | |||
333 | fid = cpu_khz / (fsb * 100); | ||
334 | rfid = fid % 5; | ||
335 | |||
336 | if (rfid) { | ||
337 | if (rfid > 2) | ||
338 | fid += 5 - rfid; | ||
339 | else | ||
340 | fid -= rfid; | ||
341 | } | ||
342 | } | ||
343 | |||
344 | printk(KERN_INFO "cpufreq: FSB currently at %i MHz, FID %d.%d\n", fsb, | ||
345 | fid / 10, fid % 10); | ||
346 | |||
347 | /* Set maximum FSB to FSB at boot time */ | ||
348 | max_fsb = nforce2_fsb_read(1); | ||
349 | |||
350 | if(!max_fsb) | ||
351 | return -EIO; | ||
352 | |||
353 | if (!min_fsb) | ||
354 | min_fsb = max_fsb - NFORCE2_SAFE_DISTANCE; | ||
355 | |||
356 | if (min_fsb < NFORCE2_MIN_FSB) | ||
357 | min_fsb = NFORCE2_MIN_FSB; | ||
358 | |||
359 | /* cpuinfo and default policy values */ | ||
360 | policy->cpuinfo.min_freq = min_fsb * fid * 100; | ||
361 | policy->cpuinfo.max_freq = max_fsb * fid * 100; | ||
362 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
363 | policy->cur = nforce2_get(policy->cpu); | ||
364 | policy->min = policy->cpuinfo.min_freq; | ||
365 | policy->max = policy->cpuinfo.max_freq; | ||
366 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
367 | |||
368 | return 0; | ||
369 | } | ||
370 | |||
371 | static int nforce2_cpu_exit(struct cpufreq_policy *policy) | ||
372 | { | ||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | static struct cpufreq_driver nforce2_driver = { | ||
377 | .name = "nforce2", | ||
378 | .verify = nforce2_verify, | ||
379 | .target = nforce2_target, | ||
380 | .get = nforce2_get, | ||
381 | .init = nforce2_cpu_init, | ||
382 | .exit = nforce2_cpu_exit, | ||
383 | .owner = THIS_MODULE, | ||
384 | }; | ||
385 | |||
386 | /** | ||
387 | * nforce2_detect_chipset - detect the Southbridge which contains FSB PLL logic | ||
388 | * | ||
389 | * Detects nForce2 A2 and C1 stepping | ||
390 | * | ||
391 | */ | ||
392 | static unsigned int nforce2_detect_chipset(void) | ||
393 | { | ||
394 | nforce2_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, | ||
395 | PCI_DEVICE_ID_NVIDIA_NFORCE2, | ||
396 | PCI_ANY_ID, PCI_ANY_ID, NULL); | ||
397 | |||
398 | if (nforce2_chipset_dev == NULL) | ||
399 | return -ENODEV; | ||
400 | |||
401 | printk(KERN_INFO "cpufreq: Detected nForce2 chipset revision %X\n", | ||
402 | nforce2_chipset_dev->revision); | ||
403 | printk(KERN_INFO | ||
404 | "cpufreq: FSB changing is maybe unstable and can lead to crashes and data loss.\n"); | ||
405 | |||
406 | return 0; | ||
407 | } | ||
408 | |||
409 | /** | ||
410 | * nforce2_init - initializes the nForce2 CPUFreq driver | ||
411 | * | ||
412 | * Initializes the nForce2 FSB support. Returns -ENODEV on unsupported | ||
413 | * devices, -EINVAL on problems during initiatization, and zero on | ||
414 | * success. | ||
415 | */ | ||
416 | static int __init nforce2_init(void) | ||
417 | { | ||
418 | /* TODO: do we need to detect the processor? */ | ||
419 | |||
420 | /* detect chipset */ | ||
421 | if (nforce2_detect_chipset()) { | ||
422 | printk(KERN_ERR "cpufreq: No nForce2 chipset.\n"); | ||
423 | return -ENODEV; | ||
424 | } | ||
425 | |||
426 | return cpufreq_register_driver(&nforce2_driver); | ||
427 | } | ||
428 | |||
429 | /** | ||
430 | * nforce2_exit - unregisters cpufreq module | ||
431 | * | ||
432 | * Unregisters nForce2 FSB change support. | ||
433 | */ | ||
434 | static void __exit nforce2_exit(void) | ||
435 | { | ||
436 | cpufreq_unregister_driver(&nforce2_driver); | ||
437 | } | ||
438 | |||
439 | module_init(nforce2_init); | ||
440 | module_exit(nforce2_exit); | ||
441 | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c new file mode 100644 index 000000000000..f43d98e11cc7 --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c | |||
@@ -0,0 +1,334 @@ | |||
1 | /* | ||
2 | * Based on documentation provided by Dave Jones. Thanks! | ||
3 | * | ||
4 | * Licensed under the terms of the GNU GPL License version 2. | ||
5 | * | ||
6 | * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/cpufreq.h> | ||
13 | #include <linux/ioport.h> | ||
14 | #include <linux/slab.h> | ||
15 | |||
16 | #include <asm/msr.h> | ||
17 | #include <asm/tsc.h> | ||
18 | #include <asm/timex.h> | ||
19 | #include <asm/io.h> | ||
20 | #include <asm/delay.h> | ||
21 | |||
22 | #define EPS_BRAND_C7M 0 | ||
23 | #define EPS_BRAND_C7 1 | ||
24 | #define EPS_BRAND_EDEN 2 | ||
25 | #define EPS_BRAND_C3 3 | ||
26 | |||
27 | struct eps_cpu_data { | ||
28 | u32 fsb; | ||
29 | struct cpufreq_frequency_table freq_table[]; | ||
30 | }; | ||
31 | |||
32 | static struct eps_cpu_data *eps_cpu[NR_CPUS]; | ||
33 | |||
34 | |||
35 | static unsigned int eps_get(unsigned int cpu) | ||
36 | { | ||
37 | struct eps_cpu_data *centaur; | ||
38 | u32 lo, hi; | ||
39 | |||
40 | if (cpu) | ||
41 | return 0; | ||
42 | centaur = eps_cpu[cpu]; | ||
43 | if (centaur == NULL) | ||
44 | return 0; | ||
45 | |||
46 | /* Return current frequency */ | ||
47 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | ||
48 | return centaur->fsb * ((lo >> 8) & 0xff); | ||
49 | } | ||
50 | |||
51 | static int eps_set_state(struct eps_cpu_data *centaur, | ||
52 | unsigned int cpu, | ||
53 | u32 dest_state) | ||
54 | { | ||
55 | struct cpufreq_freqs freqs; | ||
56 | u32 lo, hi; | ||
57 | int err = 0; | ||
58 | int i; | ||
59 | |||
60 | freqs.old = eps_get(cpu); | ||
61 | freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff); | ||
62 | freqs.cpu = cpu; | ||
63 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
64 | |||
65 | /* Wait while CPU is busy */ | ||
66 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | ||
67 | i = 0; | ||
68 | while (lo & ((1 << 16) | (1 << 17))) { | ||
69 | udelay(16); | ||
70 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | ||
71 | i++; | ||
72 | if (unlikely(i > 64)) { | ||
73 | err = -ENODEV; | ||
74 | goto postchange; | ||
75 | } | ||
76 | } | ||
77 | /* Set new multiplier and voltage */ | ||
78 | wrmsr(MSR_IA32_PERF_CTL, dest_state & 0xffff, 0); | ||
79 | /* Wait until transition end */ | ||
80 | i = 0; | ||
81 | do { | ||
82 | udelay(16); | ||
83 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | ||
84 | i++; | ||
85 | if (unlikely(i > 64)) { | ||
86 | err = -ENODEV; | ||
87 | goto postchange; | ||
88 | } | ||
89 | } while (lo & ((1 << 16) | (1 << 17))); | ||
90 | |||
91 | /* Return current frequency */ | ||
92 | postchange: | ||
93 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | ||
94 | freqs.new = centaur->fsb * ((lo >> 8) & 0xff); | ||
95 | |||
96 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
97 | return err; | ||
98 | } | ||
99 | |||
100 | static int eps_target(struct cpufreq_policy *policy, | ||
101 | unsigned int target_freq, | ||
102 | unsigned int relation) | ||
103 | { | ||
104 | struct eps_cpu_data *centaur; | ||
105 | unsigned int newstate = 0; | ||
106 | unsigned int cpu = policy->cpu; | ||
107 | unsigned int dest_state; | ||
108 | int ret; | ||
109 | |||
110 | if (unlikely(eps_cpu[cpu] == NULL)) | ||
111 | return -ENODEV; | ||
112 | centaur = eps_cpu[cpu]; | ||
113 | |||
114 | if (unlikely(cpufreq_frequency_table_target(policy, | ||
115 | &eps_cpu[cpu]->freq_table[0], | ||
116 | target_freq, | ||
117 | relation, | ||
118 | &newstate))) { | ||
119 | return -EINVAL; | ||
120 | } | ||
121 | |||
122 | /* Make frequency transition */ | ||
123 | dest_state = centaur->freq_table[newstate].index & 0xffff; | ||
124 | ret = eps_set_state(centaur, cpu, dest_state); | ||
125 | if (ret) | ||
126 | printk(KERN_ERR "eps: Timeout!\n"); | ||
127 | return ret; | ||
128 | } | ||
129 | |||
130 | static int eps_verify(struct cpufreq_policy *policy) | ||
131 | { | ||
132 | return cpufreq_frequency_table_verify(policy, | ||
133 | &eps_cpu[policy->cpu]->freq_table[0]); | ||
134 | } | ||
135 | |||
136 | static int eps_cpu_init(struct cpufreq_policy *policy) | ||
137 | { | ||
138 | unsigned int i; | ||
139 | u32 lo, hi; | ||
140 | u64 val; | ||
141 | u8 current_multiplier, current_voltage; | ||
142 | u8 max_multiplier, max_voltage; | ||
143 | u8 min_multiplier, min_voltage; | ||
144 | u8 brand; | ||
145 | u32 fsb; | ||
146 | struct eps_cpu_data *centaur; | ||
147 | struct cpufreq_frequency_table *f_table; | ||
148 | int k, step, voltage; | ||
149 | int ret; | ||
150 | int states; | ||
151 | |||
152 | if (policy->cpu != 0) | ||
153 | return -ENODEV; | ||
154 | |||
155 | /* Check brand */ | ||
156 | printk("eps: Detected VIA "); | ||
157 | rdmsr(0x1153, lo, hi); | ||
158 | brand = (((lo >> 2) ^ lo) >> 18) & 3; | ||
159 | switch(brand) { | ||
160 | case EPS_BRAND_C7M: | ||
161 | printk("C7-M\n"); | ||
162 | break; | ||
163 | case EPS_BRAND_C7: | ||
164 | printk("C7\n"); | ||
165 | break; | ||
166 | case EPS_BRAND_EDEN: | ||
167 | printk("Eden\n"); | ||
168 | break; | ||
169 | case EPS_BRAND_C3: | ||
170 | printk("C3\n"); | ||
171 | return -ENODEV; | ||
172 | break; | ||
173 | } | ||
174 | /* Enable Enhanced PowerSaver */ | ||
175 | rdmsrl(MSR_IA32_MISC_ENABLE, val); | ||
176 | if (!(val & 1 << 16)) { | ||
177 | val |= 1 << 16; | ||
178 | wrmsrl(MSR_IA32_MISC_ENABLE, val); | ||
179 | /* Can be locked at 0 */ | ||
180 | rdmsrl(MSR_IA32_MISC_ENABLE, val); | ||
181 | if (!(val & 1 << 16)) { | ||
182 | printk("eps: Can't enable Enhanced PowerSaver\n"); | ||
183 | return -ENODEV; | ||
184 | } | ||
185 | } | ||
186 | |||
187 | /* Print voltage and multiplier */ | ||
188 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | ||
189 | current_voltage = lo & 0xff; | ||
190 | printk("eps: Current voltage = %dmV\n", current_voltage * 16 + 700); | ||
191 | current_multiplier = (lo >> 8) & 0xff; | ||
192 | printk("eps: Current multiplier = %d\n", current_multiplier); | ||
193 | |||
194 | /* Print limits */ | ||
195 | max_voltage = hi & 0xff; | ||
196 | printk("eps: Highest voltage = %dmV\n", max_voltage * 16 + 700); | ||
197 | max_multiplier = (hi >> 8) & 0xff; | ||
198 | printk("eps: Highest multiplier = %d\n", max_multiplier); | ||
199 | min_voltage = (hi >> 16) & 0xff; | ||
200 | printk("eps: Lowest voltage = %dmV\n", min_voltage * 16 + 700); | ||
201 | min_multiplier = (hi >> 24) & 0xff; | ||
202 | printk("eps: Lowest multiplier = %d\n", min_multiplier); | ||
203 | |||
204 | /* Sanity checks */ | ||
205 | if (current_multiplier == 0 || max_multiplier == 0 | ||
206 | || min_multiplier == 0) | ||
207 | return -EINVAL; | ||
208 | if (current_multiplier > max_multiplier | ||
209 | || max_multiplier <= min_multiplier) | ||
210 | return -EINVAL; | ||
211 | if (current_voltage > 0x1c || max_voltage > 0x1c) | ||
212 | return -EINVAL; | ||
213 | if (max_voltage < min_voltage) | ||
214 | return -EINVAL; | ||
215 | |||
216 | /* Calc FSB speed */ | ||
217 | fsb = cpu_khz / current_multiplier; | ||
218 | /* Calc number of p-states supported */ | ||
219 | if (brand == EPS_BRAND_C7M) | ||
220 | states = max_multiplier - min_multiplier + 1; | ||
221 | else | ||
222 | states = 2; | ||
223 | |||
224 | /* Allocate private data and frequency table for current cpu */ | ||
225 | centaur = kzalloc(sizeof(struct eps_cpu_data) | ||
226 | + (states + 1) * sizeof(struct cpufreq_frequency_table), | ||
227 | GFP_KERNEL); | ||
228 | if (!centaur) | ||
229 | return -ENOMEM; | ||
230 | eps_cpu[0] = centaur; | ||
231 | |||
232 | /* Copy basic values */ | ||
233 | centaur->fsb = fsb; | ||
234 | |||
235 | /* Fill frequency and MSR value table */ | ||
236 | f_table = ¢aur->freq_table[0]; | ||
237 | if (brand != EPS_BRAND_C7M) { | ||
238 | f_table[0].frequency = fsb * min_multiplier; | ||
239 | f_table[0].index = (min_multiplier << 8) | min_voltage; | ||
240 | f_table[1].frequency = fsb * max_multiplier; | ||
241 | f_table[1].index = (max_multiplier << 8) | max_voltage; | ||
242 | f_table[2].frequency = CPUFREQ_TABLE_END; | ||
243 | } else { | ||
244 | k = 0; | ||
245 | step = ((max_voltage - min_voltage) * 256) | ||
246 | / (max_multiplier - min_multiplier); | ||
247 | for (i = min_multiplier; i <= max_multiplier; i++) { | ||
248 | voltage = (k * step) / 256 + min_voltage; | ||
249 | f_table[k].frequency = fsb * i; | ||
250 | f_table[k].index = (i << 8) | voltage; | ||
251 | k++; | ||
252 | } | ||
253 | f_table[k].frequency = CPUFREQ_TABLE_END; | ||
254 | } | ||
255 | |||
256 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
257 | policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */ | ||
258 | policy->cur = fsb * current_multiplier; | ||
259 | |||
260 | ret = cpufreq_frequency_table_cpuinfo(policy, ¢aur->freq_table[0]); | ||
261 | if (ret) { | ||
262 | kfree(centaur); | ||
263 | return ret; | ||
264 | } | ||
265 | |||
266 | cpufreq_frequency_table_get_attr(¢aur->freq_table[0], policy->cpu); | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | static int eps_cpu_exit(struct cpufreq_policy *policy) | ||
271 | { | ||
272 | unsigned int cpu = policy->cpu; | ||
273 | struct eps_cpu_data *centaur; | ||
274 | u32 lo, hi; | ||
275 | |||
276 | if (eps_cpu[cpu] == NULL) | ||
277 | return -ENODEV; | ||
278 | centaur = eps_cpu[cpu]; | ||
279 | |||
280 | /* Get max frequency */ | ||
281 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | ||
282 | /* Set max frequency */ | ||
283 | eps_set_state(centaur, cpu, hi & 0xffff); | ||
284 | /* Bye */ | ||
285 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
286 | kfree(eps_cpu[cpu]); | ||
287 | eps_cpu[cpu] = NULL; | ||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | static struct freq_attr* eps_attr[] = { | ||
292 | &cpufreq_freq_attr_scaling_available_freqs, | ||
293 | NULL, | ||
294 | }; | ||
295 | |||
296 | static struct cpufreq_driver eps_driver = { | ||
297 | .verify = eps_verify, | ||
298 | .target = eps_target, | ||
299 | .init = eps_cpu_init, | ||
300 | .exit = eps_cpu_exit, | ||
301 | .get = eps_get, | ||
302 | .name = "e_powersaver", | ||
303 | .owner = THIS_MODULE, | ||
304 | .attr = eps_attr, | ||
305 | }; | ||
306 | |||
307 | static int __init eps_init(void) | ||
308 | { | ||
309 | struct cpuinfo_x86 *c = cpu_data; | ||
310 | |||
311 | /* This driver will work only on Centaur C7 processors with | ||
312 | * Enhanced SpeedStep/PowerSaver registers */ | ||
313 | if (c->x86_vendor != X86_VENDOR_CENTAUR | ||
314 | || c->x86 != 6 || c->x86_model != 10) | ||
315 | return -ENODEV; | ||
316 | if (!cpu_has(c, X86_FEATURE_EST)) | ||
317 | return -ENODEV; | ||
318 | |||
319 | if (cpufreq_register_driver(&eps_driver)) | ||
320 | return -EINVAL; | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static void __exit eps_exit(void) | ||
325 | { | ||
326 | cpufreq_unregister_driver(&eps_driver); | ||
327 | } | ||
328 | |||
329 | MODULE_AUTHOR("Rafa³ Bilski <rafalbilski@interia.pl>"); | ||
330 | MODULE_DESCRIPTION("Enhanced PowerSaver driver for VIA C7 CPU's."); | ||
331 | MODULE_LICENSE("GPL"); | ||
332 | |||
333 | module_init(eps_init); | ||
334 | module_exit(eps_exit); | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/arch/x86/kernel/cpu/cpufreq/elanfreq.c new file mode 100644 index 000000000000..f317276afa7a --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/elanfreq.c | |||
@@ -0,0 +1,309 @@ | |||
1 | /* | ||
2 | * elanfreq: cpufreq driver for the AMD ELAN family | ||
3 | * | ||
4 | * (c) Copyright 2002 Robert Schwebel <r.schwebel@pengutronix.de> | ||
5 | * | ||
6 | * Parts of this code are (c) Sven Geggus <sven@geggus.net> | ||
7 | * | ||
8 | * All Rights Reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * 2002-02-13: - initial revision for 2.4.18-pre9 by Robert Schwebel | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/init.h> | ||
22 | |||
23 | #include <linux/slab.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/cpufreq.h> | ||
26 | |||
27 | #include <asm/msr.h> | ||
28 | #include <asm/timex.h> | ||
29 | #include <asm/io.h> | ||
30 | |||
31 | #define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */ | ||
32 | #define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */ | ||
33 | |||
34 | /* Module parameter */ | ||
35 | static int max_freq; | ||
36 | |||
37 | struct s_elan_multiplier { | ||
38 | int clock; /* frequency in kHz */ | ||
39 | int val40h; /* PMU Force Mode register */ | ||
40 | int val80h; /* CPU Clock Speed Register */ | ||
41 | }; | ||
42 | |||
43 | /* | ||
44 | * It is important that the frequencies | ||
45 | * are listed in ascending order here! | ||
46 | */ | ||
47 | struct s_elan_multiplier elan_multiplier[] = { | ||
48 | {1000, 0x02, 0x18}, | ||
49 | {2000, 0x02, 0x10}, | ||
50 | {4000, 0x02, 0x08}, | ||
51 | {8000, 0x00, 0x00}, | ||
52 | {16000, 0x00, 0x02}, | ||
53 | {33000, 0x00, 0x04}, | ||
54 | {66000, 0x01, 0x04}, | ||
55 | {99000, 0x01, 0x05} | ||
56 | }; | ||
57 | |||
58 | static struct cpufreq_frequency_table elanfreq_table[] = { | ||
59 | {0, 1000}, | ||
60 | {1, 2000}, | ||
61 | {2, 4000}, | ||
62 | {3, 8000}, | ||
63 | {4, 16000}, | ||
64 | {5, 33000}, | ||
65 | {6, 66000}, | ||
66 | {7, 99000}, | ||
67 | {0, CPUFREQ_TABLE_END}, | ||
68 | }; | ||
69 | |||
70 | |||
71 | /** | ||
72 | * elanfreq_get_cpu_frequency: determine current cpu speed | ||
73 | * | ||
74 | * Finds out at which frequency the CPU of the Elan SOC runs | ||
75 | * at the moment. Frequencies from 1 to 33 MHz are generated | ||
76 | * the normal way, 66 and 99 MHz are called "Hyperspeed Mode" | ||
77 | * and have the rest of the chip running with 33 MHz. | ||
78 | */ | ||
79 | |||
80 | static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu) | ||
81 | { | ||
82 | u8 clockspeed_reg; /* Clock Speed Register */ | ||
83 | |||
84 | local_irq_disable(); | ||
85 | outb_p(0x80,REG_CSCIR); | ||
86 | clockspeed_reg = inb_p(REG_CSCDR); | ||
87 | local_irq_enable(); | ||
88 | |||
89 | if ((clockspeed_reg & 0xE0) == 0xE0) | ||
90 | return 0; | ||
91 | |||
92 | /* Are we in CPU clock multiplied mode (66/99 MHz)? */ | ||
93 | if ((clockspeed_reg & 0xE0) == 0xC0) { | ||
94 | if ((clockspeed_reg & 0x01) == 0) | ||
95 | return 66000; | ||
96 | else | ||
97 | return 99000; | ||
98 | } | ||
99 | |||
100 | /* 33 MHz is not 32 MHz... */ | ||
101 | if ((clockspeed_reg & 0xE0)==0xA0) | ||
102 | return 33000; | ||
103 | |||
104 | return ((1<<((clockspeed_reg & 0xE0) >> 5)) * 1000); | ||
105 | } | ||
106 | |||
107 | |||
108 | /** | ||
109 | * elanfreq_set_cpu_frequency: Change the CPU core frequency | ||
110 | * @cpu: cpu number | ||
111 | * @freq: frequency in kHz | ||
112 | * | ||
113 | * This function takes a frequency value and changes the CPU frequency | ||
114 | * according to this. Note that the frequency has to be checked by | ||
115 | * elanfreq_validatespeed() for correctness! | ||
116 | * | ||
117 | * There is no return value. | ||
118 | */ | ||
119 | |||
120 | static void elanfreq_set_cpu_state (unsigned int state) | ||
121 | { | ||
122 | struct cpufreq_freqs freqs; | ||
123 | |||
124 | freqs.old = elanfreq_get_cpu_frequency(0); | ||
125 | freqs.new = elan_multiplier[state].clock; | ||
126 | freqs.cpu = 0; /* elanfreq.c is UP only driver */ | ||
127 | |||
128 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
129 | |||
130 | printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n", | ||
131 | elan_multiplier[state].clock); | ||
132 | |||
133 | |||
134 | /* | ||
135 | * Access to the Elan's internal registers is indexed via | ||
136 | * 0x22: Chip Setup & Control Register Index Register (CSCI) | ||
137 | * 0x23: Chip Setup & Control Register Data Register (CSCD) | ||
138 | * | ||
139 | */ | ||
140 | |||
141 | /* | ||
142 | * 0x40 is the Power Management Unit's Force Mode Register. | ||
143 | * Bit 6 enables Hyperspeed Mode (66/100 MHz core frequency) | ||
144 | */ | ||
145 | |||
146 | local_irq_disable(); | ||
147 | outb_p(0x40,REG_CSCIR); /* Disable hyperspeed mode */ | ||
148 | outb_p(0x00,REG_CSCDR); | ||
149 | local_irq_enable(); /* wait till internal pipelines and */ | ||
150 | udelay(1000); /* buffers have cleaned up */ | ||
151 | |||
152 | local_irq_disable(); | ||
153 | |||
154 | /* now, set the CPU clock speed register (0x80) */ | ||
155 | outb_p(0x80,REG_CSCIR); | ||
156 | outb_p(elan_multiplier[state].val80h,REG_CSCDR); | ||
157 | |||
158 | /* now, the hyperspeed bit in PMU Force Mode Register (0x40) */ | ||
159 | outb_p(0x40,REG_CSCIR); | ||
160 | outb_p(elan_multiplier[state].val40h,REG_CSCDR); | ||
161 | udelay(10000); | ||
162 | local_irq_enable(); | ||
163 | |||
164 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
165 | }; | ||
166 | |||
167 | |||
168 | /** | ||
169 | * elanfreq_validatespeed: test if frequency range is valid | ||
170 | * @policy: the policy to validate | ||
171 | * | ||
172 | * This function checks if a given frequency range in kHz is valid | ||
173 | * for the hardware supported by the driver. | ||
174 | */ | ||
175 | |||
176 | static int elanfreq_verify (struct cpufreq_policy *policy) | ||
177 | { | ||
178 | return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]); | ||
179 | } | ||
180 | |||
181 | static int elanfreq_target (struct cpufreq_policy *policy, | ||
182 | unsigned int target_freq, | ||
183 | unsigned int relation) | ||
184 | { | ||
185 | unsigned int newstate = 0; | ||
186 | |||
187 | if (cpufreq_frequency_table_target(policy, &elanfreq_table[0], target_freq, relation, &newstate)) | ||
188 | return -EINVAL; | ||
189 | |||
190 | elanfreq_set_cpu_state(newstate); | ||
191 | |||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | |||
196 | /* | ||
197 | * Module init and exit code | ||
198 | */ | ||
199 | |||
200 | static int elanfreq_cpu_init(struct cpufreq_policy *policy) | ||
201 | { | ||
202 | struct cpuinfo_x86 *c = cpu_data; | ||
203 | unsigned int i; | ||
204 | int result; | ||
205 | |||
206 | /* capability check */ | ||
207 | if ((c->x86_vendor != X86_VENDOR_AMD) || | ||
208 | (c->x86 != 4) || (c->x86_model!=10)) | ||
209 | return -ENODEV; | ||
210 | |||
211 | /* max freq */ | ||
212 | if (!max_freq) | ||
213 | max_freq = elanfreq_get_cpu_frequency(0); | ||
214 | |||
215 | /* table init */ | ||
216 | for (i=0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) { | ||
217 | if (elanfreq_table[i].frequency > max_freq) | ||
218 | elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
219 | } | ||
220 | |||
221 | /* cpuinfo and default policy values */ | ||
222 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
223 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
224 | policy->cur = elanfreq_get_cpu_frequency(0); | ||
225 | |||
226 | result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table); | ||
227 | if (result) | ||
228 | return (result); | ||
229 | |||
230 | cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu); | ||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | |||
235 | static int elanfreq_cpu_exit(struct cpufreq_policy *policy) | ||
236 | { | ||
237 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | |||
242 | #ifndef MODULE | ||
243 | /** | ||
244 | * elanfreq_setup - elanfreq command line parameter parsing | ||
245 | * | ||
246 | * elanfreq command line parameter. Use: | ||
247 | * elanfreq=66000 | ||
248 | * to set the maximum CPU frequency to 66 MHz. Note that in | ||
249 | * case you do not give this boot parameter, the maximum | ||
250 | * frequency will fall back to _current_ CPU frequency which | ||
251 | * might be lower. If you build this as a module, use the | ||
252 | * max_freq module parameter instead. | ||
253 | */ | ||
254 | static int __init elanfreq_setup(char *str) | ||
255 | { | ||
256 | max_freq = simple_strtoul(str, &str, 0); | ||
257 | printk(KERN_WARNING "You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n"); | ||
258 | return 1; | ||
259 | } | ||
260 | __setup("elanfreq=", elanfreq_setup); | ||
261 | #endif | ||
262 | |||
263 | |||
264 | static struct freq_attr* elanfreq_attr[] = { | ||
265 | &cpufreq_freq_attr_scaling_available_freqs, | ||
266 | NULL, | ||
267 | }; | ||
268 | |||
269 | |||
270 | static struct cpufreq_driver elanfreq_driver = { | ||
271 | .get = elanfreq_get_cpu_frequency, | ||
272 | .verify = elanfreq_verify, | ||
273 | .target = elanfreq_target, | ||
274 | .init = elanfreq_cpu_init, | ||
275 | .exit = elanfreq_cpu_exit, | ||
276 | .name = "elanfreq", | ||
277 | .owner = THIS_MODULE, | ||
278 | .attr = elanfreq_attr, | ||
279 | }; | ||
280 | |||
281 | |||
282 | static int __init elanfreq_init(void) | ||
283 | { | ||
284 | struct cpuinfo_x86 *c = cpu_data; | ||
285 | |||
286 | /* Test if we have the right hardware */ | ||
287 | if ((c->x86_vendor != X86_VENDOR_AMD) || | ||
288 | (c->x86 != 4) || (c->x86_model!=10)) { | ||
289 | printk(KERN_INFO "elanfreq: error: no Elan processor found!\n"); | ||
290 | return -ENODEV; | ||
291 | } | ||
292 | return cpufreq_register_driver(&elanfreq_driver); | ||
293 | } | ||
294 | |||
295 | |||
296 | static void __exit elanfreq_exit(void) | ||
297 | { | ||
298 | cpufreq_unregister_driver(&elanfreq_driver); | ||
299 | } | ||
300 | |||
301 | |||
302 | module_param (max_freq, int, 0444); | ||
303 | |||
304 | MODULE_LICENSE("GPL"); | ||
305 | MODULE_AUTHOR("Robert Schwebel <r.schwebel@pengutronix.de>, Sven Geggus <sven@geggus.net>"); | ||
306 | MODULE_DESCRIPTION("cpufreq driver for AMD's Elan CPUs"); | ||
307 | |||
308 | module_init(elanfreq_init); | ||
309 | module_exit(elanfreq_exit); | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c new file mode 100644 index 000000000000..461dabc4e495 --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c | |||
@@ -0,0 +1,495 @@ | |||
1 | /* | ||
2 | * Cyrix MediaGX and NatSemi Geode Suspend Modulation | ||
3 | * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com> | ||
4 | * (C) 2002 Hiroshi Miura <miura@da-cha.org> | ||
5 | * All Rights Reserved | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * version 2 as published by the Free Software Foundation | ||
10 | * | ||
11 | * The author(s) of this software shall not be held liable for damages | ||
12 | * of any nature resulting due to the use of this software. This | ||
13 | * software is provided AS-IS with no warranties. | ||
14 | * | ||
15 | * Theoritical note: | ||
16 | * | ||
17 | * (see Geode(tm) CS5530 manual (rev.4.1) page.56) | ||
18 | * | ||
19 | * CPU frequency control on NatSemi Geode GX1/GXLV processor and CS55x0 | ||
20 | * are based on Suspend Moduration. | ||
21 | * | ||
22 | * Suspend Modulation works by asserting and de-asserting the SUSP# pin | ||
23 | * to CPU(GX1/GXLV) for configurable durations. When asserting SUSP# | ||
24 | * the CPU enters an idle state. GX1 stops its core clock when SUSP# is | ||
25 | * asserted then power consumption is reduced. | ||
26 | * | ||
27 | * Suspend Modulation's OFF/ON duration are configurable | ||
28 | * with 'Suspend Modulation OFF Count Register' | ||
29 | * and 'Suspend Modulation ON Count Register'. | ||
30 | * These registers are 8bit counters that represent the number of | ||
31 | * 32us intervals which the SUSP# pin is asserted(ON)/de-asserted(OFF) | ||
32 | * to the processor. | ||
33 | * | ||
34 | * These counters define a ratio which is the effective frequency | ||
35 | * of operation of the system. | ||
36 | * | ||
37 | * OFF Count | ||
38 | * F_eff = Fgx * ---------------------- | ||
39 | * OFF Count + ON Count | ||
40 | * | ||
41 | * 0 <= On Count, Off Count <= 255 | ||
42 | * | ||
43 | * From these limits, we can get register values | ||
44 | * | ||
45 | * off_duration + on_duration <= MAX_DURATION | ||
46 | * on_duration = off_duration * (stock_freq - freq) / freq | ||
47 | * | ||
48 | * off_duration = (freq * DURATION) / stock_freq | ||
49 | * on_duration = DURATION - off_duration | ||
50 | * | ||
51 | * | ||
52 | *--------------------------------------------------------------------------- | ||
53 | * | ||
54 | * ChangeLog: | ||
55 | * Dec. 12, 2003 Hiroshi Miura <miura@da-cha.org> | ||
56 | * - fix on/off register mistake | ||
57 | * - fix cpu_khz calc when it stops cpu modulation. | ||
58 | * | ||
59 | * Dec. 11, 2002 Hiroshi Miura <miura@da-cha.org> | ||
60 | * - rewrite for Cyrix MediaGX Cx5510/5520 and | ||
61 | * NatSemi Geode Cs5530(A). | ||
62 | * | ||
63 | * Jul. ??, 2002 Zwane Mwaikambo <zwane@commfireservices.com> | ||
64 | * - cs5530_mod patch for 2.4.19-rc1. | ||
65 | * | ||
66 | *--------------------------------------------------------------------------- | ||
67 | * | ||
68 | * Todo | ||
69 | * Test on machines with 5510, 5530, 5530A | ||
70 | */ | ||
71 | |||
72 | /************************************************************************ | ||
73 | * Suspend Modulation - Definitions * | ||
74 | ************************************************************************/ | ||
75 | |||
76 | #include <linux/kernel.h> | ||
77 | #include <linux/module.h> | ||
78 | #include <linux/init.h> | ||
79 | #include <linux/smp.h> | ||
80 | #include <linux/cpufreq.h> | ||
81 | #include <linux/pci.h> | ||
82 | #include <asm/processor-cyrix.h> | ||
83 | #include <asm/errno.h> | ||
84 | |||
85 | /* PCI config registers, all at F0 */ | ||
86 | #define PCI_PMER1 0x80 /* power management enable register 1 */ | ||
87 | #define PCI_PMER2 0x81 /* power management enable register 2 */ | ||
88 | #define PCI_PMER3 0x82 /* power management enable register 3 */ | ||
89 | #define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */ | ||
90 | #define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */ | ||
91 | #define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */ | ||
92 | #define PCI_MODON 0x95 /* suspend modulation ON counter register */ | ||
93 | #define PCI_SUSCFG 0x96 /* suspend configuration register */ | ||
94 | |||
95 | /* PMER1 bits */ | ||
96 | #define GPM (1<<0) /* global power management */ | ||
97 | #define GIT (1<<1) /* globally enable PM device idle timers */ | ||
98 | #define GTR (1<<2) /* globally enable IO traps */ | ||
99 | #define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */ | ||
100 | #define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */ | ||
101 | |||
102 | /* SUSCFG bits */ | ||
103 | #define SUSMOD (1<<0) /* enable/disable suspend modulation */ | ||
104 | /* the belows support only with cs5530 (after rev.1.2)/cs5530A */ | ||
105 | #define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */ | ||
106 | /* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */ | ||
107 | #define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */ | ||
108 | /* the belows support only with cs5530A */ | ||
109 | #define PWRSVE_ISA (1<<3) /* stop ISA clock */ | ||
110 | #define PWRSVE (1<<4) /* active idle */ | ||
111 | |||
112 | struct gxfreq_params { | ||
113 | u8 on_duration; | ||
114 | u8 off_duration; | ||
115 | u8 pci_suscfg; | ||
116 | u8 pci_pmer1; | ||
117 | u8 pci_pmer2; | ||
118 | struct pci_dev *cs55x0; | ||
119 | }; | ||
120 | |||
121 | static struct gxfreq_params *gx_params; | ||
122 | static int stock_freq; | ||
123 | |||
124 | /* PCI bus clock - defaults to 30.000 if cpu_khz is not available */ | ||
125 | static int pci_busclk = 0; | ||
126 | module_param (pci_busclk, int, 0444); | ||
127 | |||
128 | /* maximum duration for which the cpu may be suspended | ||
129 | * (32us * MAX_DURATION). If no parameter is given, this defaults | ||
130 | * to 255. | ||
131 | * Note that this leads to a maximum of 8 ms(!) where the CPU clock | ||
132 | * is suspended -- processing power is just 0.39% of what it used to be, | ||
133 | * though. 781.25 kHz(!) for a 200 MHz processor -- wow. */ | ||
134 | static int max_duration = 255; | ||
135 | module_param (max_duration, int, 0444); | ||
136 | |||
137 | /* For the default policy, we want at least some processing power | ||
138 | * - let's say 5%. (min = maxfreq / POLICY_MIN_DIV) | ||
139 | */ | ||
140 | #define POLICY_MIN_DIV 20 | ||
141 | |||
142 | |||
143 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "gx-suspmod", msg) | ||
144 | |||
145 | /** | ||
146 | * we can detect a core multipiler from dir0_lsb | ||
147 | * from GX1 datasheet p.56, | ||
148 | * MULT[3:0]: | ||
149 | * 0000 = SYSCLK multiplied by 4 (test only) | ||
150 | * 0001 = SYSCLK multiplied by 10 | ||
151 | * 0010 = SYSCLK multiplied by 4 | ||
152 | * 0011 = SYSCLK multiplied by 6 | ||
153 | * 0100 = SYSCLK multiplied by 9 | ||
154 | * 0101 = SYSCLK multiplied by 5 | ||
155 | * 0110 = SYSCLK multiplied by 7 | ||
156 | * 0111 = SYSCLK multiplied by 8 | ||
157 | * of 33.3MHz | ||
158 | **/ | ||
159 | static int gx_freq_mult[16] = { | ||
160 | 4, 10, 4, 6, 9, 5, 7, 8, | ||
161 | 0, 0, 0, 0, 0, 0, 0, 0 | ||
162 | }; | ||
163 | |||
164 | |||
165 | /**************************************************************** | ||
166 | * Low Level chipset interface * | ||
167 | ****************************************************************/ | ||
168 | static struct pci_device_id gx_chipset_tbl[] __initdata = { | ||
169 | { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, PCI_ANY_ID, PCI_ANY_ID }, | ||
170 | { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, PCI_ANY_ID, PCI_ANY_ID }, | ||
171 | { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, PCI_ANY_ID, PCI_ANY_ID }, | ||
172 | { 0, }, | ||
173 | }; | ||
174 | |||
175 | /** | ||
176 | * gx_detect_chipset: | ||
177 | * | ||
178 | **/ | ||
179 | static __init struct pci_dev *gx_detect_chipset(void) | ||
180 | { | ||
181 | struct pci_dev *gx_pci = NULL; | ||
182 | |||
183 | /* check if CPU is a MediaGX or a Geode. */ | ||
184 | if ((current_cpu_data.x86_vendor != X86_VENDOR_NSC) && | ||
185 | (current_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) { | ||
186 | dprintk("error: no MediaGX/Geode processor found!\n"); | ||
187 | return NULL; | ||
188 | } | ||
189 | |||
190 | /* detect which companion chip is used */ | ||
191 | while ((gx_pci = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, gx_pci)) != NULL) { | ||
192 | if ((pci_match_id(gx_chipset_tbl, gx_pci)) != NULL) | ||
193 | return gx_pci; | ||
194 | } | ||
195 | |||
196 | dprintk("error: no supported chipset found!\n"); | ||
197 | return NULL; | ||
198 | } | ||
199 | |||
200 | /** | ||
201 | * gx_get_cpuspeed: | ||
202 | * | ||
203 | * Finds out at which efficient frequency the Cyrix MediaGX/NatSemi Geode CPU runs. | ||
204 | */ | ||
205 | static unsigned int gx_get_cpuspeed(unsigned int cpu) | ||
206 | { | ||
207 | if ((gx_params->pci_suscfg & SUSMOD) == 0) | ||
208 | return stock_freq; | ||
209 | |||
210 | return (stock_freq * gx_params->off_duration) | ||
211 | / (gx_params->on_duration + gx_params->off_duration); | ||
212 | } | ||
213 | |||
214 | /** | ||
215 | * gx_validate_speed: | ||
216 | * determine current cpu speed | ||
217 | * | ||
218 | **/ | ||
219 | |||
220 | static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off_duration) | ||
221 | { | ||
222 | unsigned int i; | ||
223 | u8 tmp_on, tmp_off; | ||
224 | int old_tmp_freq = stock_freq; | ||
225 | int tmp_freq; | ||
226 | |||
227 | *off_duration=1; | ||
228 | *on_duration=0; | ||
229 | |||
230 | for (i=max_duration; i>0; i--) { | ||
231 | tmp_off = ((khz * i) / stock_freq) & 0xff; | ||
232 | tmp_on = i - tmp_off; | ||
233 | tmp_freq = (stock_freq * tmp_off) / i; | ||
234 | /* if this relation is closer to khz, use this. If it's equal, | ||
235 | * prefer it, too - lower latency */ | ||
236 | if (abs(tmp_freq - khz) <= abs(old_tmp_freq - khz)) { | ||
237 | *on_duration = tmp_on; | ||
238 | *off_duration = tmp_off; | ||
239 | old_tmp_freq = tmp_freq; | ||
240 | } | ||
241 | } | ||
242 | |||
243 | return old_tmp_freq; | ||
244 | } | ||
245 | |||
246 | |||
247 | /** | ||
248 | * gx_set_cpuspeed: | ||
249 | * set cpu speed in khz. | ||
250 | **/ | ||
251 | |||
252 | static void gx_set_cpuspeed(unsigned int khz) | ||
253 | { | ||
254 | u8 suscfg, pmer1; | ||
255 | unsigned int new_khz; | ||
256 | unsigned long flags; | ||
257 | struct cpufreq_freqs freqs; | ||
258 | |||
259 | freqs.cpu = 0; | ||
260 | freqs.old = gx_get_cpuspeed(0); | ||
261 | |||
262 | new_khz = gx_validate_speed(khz, &gx_params->on_duration, &gx_params->off_duration); | ||
263 | |||
264 | freqs.new = new_khz; | ||
265 | |||
266 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
267 | local_irq_save(flags); | ||
268 | |||
269 | if (new_khz != stock_freq) { /* if new khz == 100% of CPU speed, it is special case */ | ||
270 | switch (gx_params->cs55x0->device) { | ||
271 | case PCI_DEVICE_ID_CYRIX_5530_LEGACY: | ||
272 | pmer1 = gx_params->pci_pmer1 | IRQ_SPDUP | VID_SPDUP; | ||
273 | /* FIXME: need to test other values -- Zwane,Miura */ | ||
274 | pci_write_config_byte(gx_params->cs55x0, PCI_IRQTC, 4); /* typical 2 to 4ms */ | ||
275 | pci_write_config_byte(gx_params->cs55x0, PCI_VIDTC, 100);/* typical 50 to 100ms */ | ||
276 | pci_write_config_byte(gx_params->cs55x0, PCI_PMER1, pmer1); | ||
277 | |||
278 | if (gx_params->cs55x0->revision < 0x10) { /* CS5530(rev 1.2, 1.3) */ | ||
279 | suscfg = gx_params->pci_suscfg | SUSMOD; | ||
280 | } else { /* CS5530A,B.. */ | ||
281 | suscfg = gx_params->pci_suscfg | SUSMOD | PWRSVE; | ||
282 | } | ||
283 | break; | ||
284 | case PCI_DEVICE_ID_CYRIX_5520: | ||
285 | case PCI_DEVICE_ID_CYRIX_5510: | ||
286 | suscfg = gx_params->pci_suscfg | SUSMOD; | ||
287 | break; | ||
288 | default: | ||
289 | local_irq_restore(flags); | ||
290 | dprintk("fatal: try to set unknown chipset.\n"); | ||
291 | return; | ||
292 | } | ||
293 | } else { | ||
294 | suscfg = gx_params->pci_suscfg & ~(SUSMOD); | ||
295 | gx_params->off_duration = 0; | ||
296 | gx_params->on_duration = 0; | ||
297 | dprintk("suspend modulation disabled: cpu runs 100 percent speed.\n"); | ||
298 | } | ||
299 | |||
300 | pci_write_config_byte(gx_params->cs55x0, PCI_MODOFF, gx_params->off_duration); | ||
301 | pci_write_config_byte(gx_params->cs55x0, PCI_MODON, gx_params->on_duration); | ||
302 | |||
303 | pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, suscfg); | ||
304 | pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg); | ||
305 | |||
306 | local_irq_restore(flags); | ||
307 | |||
308 | gx_params->pci_suscfg = suscfg; | ||
309 | |||
310 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
311 | |||
312 | dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", | ||
313 | gx_params->on_duration * 32, gx_params->off_duration * 32); | ||
314 | dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); | ||
315 | } | ||
316 | |||
317 | /**************************************************************** | ||
318 | * High level functions * | ||
319 | ****************************************************************/ | ||
320 | |||
321 | /* | ||
322 | * cpufreq_gx_verify: test if frequency range is valid | ||
323 | * | ||
324 | * This function checks if a given frequency range in kHz is valid | ||
325 | * for the hardware supported by the driver. | ||
326 | */ | ||
327 | |||
328 | static int cpufreq_gx_verify(struct cpufreq_policy *policy) | ||
329 | { | ||
330 | unsigned int tmp_freq = 0; | ||
331 | u8 tmp1, tmp2; | ||
332 | |||
333 | if (!stock_freq || !policy) | ||
334 | return -EINVAL; | ||
335 | |||
336 | policy->cpu = 0; | ||
337 | cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq); | ||
338 | |||
339 | /* it needs to be assured that at least one supported frequency is | ||
340 | * within policy->min and policy->max. If it is not, policy->max | ||
341 | * needs to be increased until one freuqency is supported. | ||
342 | * policy->min may not be decreased, though. This way we guarantee a | ||
343 | * specific processing capacity. | ||
344 | */ | ||
345 | tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2); | ||
346 | if (tmp_freq < policy->min) | ||
347 | tmp_freq += stock_freq / max_duration; | ||
348 | policy->min = tmp_freq; | ||
349 | if (policy->min > policy->max) | ||
350 | policy->max = tmp_freq; | ||
351 | tmp_freq = gx_validate_speed(policy->max, &tmp1, &tmp2); | ||
352 | if (tmp_freq > policy->max) | ||
353 | tmp_freq -= stock_freq / max_duration; | ||
354 | policy->max = tmp_freq; | ||
355 | if (policy->max < policy->min) | ||
356 | policy->max = policy->min; | ||
357 | cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq); | ||
358 | |||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * cpufreq_gx_target: | ||
364 | * | ||
365 | */ | ||
366 | static int cpufreq_gx_target(struct cpufreq_policy *policy, | ||
367 | unsigned int target_freq, | ||
368 | unsigned int relation) | ||
369 | { | ||
370 | u8 tmp1, tmp2; | ||
371 | unsigned int tmp_freq; | ||
372 | |||
373 | if (!stock_freq || !policy) | ||
374 | return -EINVAL; | ||
375 | |||
376 | policy->cpu = 0; | ||
377 | |||
378 | tmp_freq = gx_validate_speed(target_freq, &tmp1, &tmp2); | ||
379 | while (tmp_freq < policy->min) { | ||
380 | tmp_freq += stock_freq / max_duration; | ||
381 | tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2); | ||
382 | } | ||
383 | while (tmp_freq > policy->max) { | ||
384 | tmp_freq -= stock_freq / max_duration; | ||
385 | tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2); | ||
386 | } | ||
387 | |||
388 | gx_set_cpuspeed(tmp_freq); | ||
389 | |||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) | ||
394 | { | ||
395 | unsigned int maxfreq, curfreq; | ||
396 | |||
397 | if (!policy || policy->cpu != 0) | ||
398 | return -ENODEV; | ||
399 | |||
400 | /* determine maximum frequency */ | ||
401 | if (pci_busclk) { | ||
402 | maxfreq = pci_busclk * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; | ||
403 | } else if (cpu_khz) { | ||
404 | maxfreq = cpu_khz; | ||
405 | } else { | ||
406 | maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; | ||
407 | } | ||
408 | stock_freq = maxfreq; | ||
409 | curfreq = gx_get_cpuspeed(0); | ||
410 | |||
411 | dprintk("cpu max frequency is %d.\n", maxfreq); | ||
412 | dprintk("cpu current frequency is %dkHz.\n",curfreq); | ||
413 | |||
414 | /* setup basic struct for cpufreq API */ | ||
415 | policy->cpu = 0; | ||
416 | |||
417 | if (max_duration < POLICY_MIN_DIV) | ||
418 | policy->min = maxfreq / max_duration; | ||
419 | else | ||
420 | policy->min = maxfreq / POLICY_MIN_DIV; | ||
421 | policy->max = maxfreq; | ||
422 | policy->cur = curfreq; | ||
423 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
424 | policy->cpuinfo.min_freq = maxfreq / max_duration; | ||
425 | policy->cpuinfo.max_freq = maxfreq; | ||
426 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
427 | |||
428 | return 0; | ||
429 | } | ||
430 | |||
431 | /* | ||
432 | * cpufreq_gx_init: | ||
433 | * MediaGX/Geode GX initialize cpufreq driver | ||
434 | */ | ||
435 | static struct cpufreq_driver gx_suspmod_driver = { | ||
436 | .get = gx_get_cpuspeed, | ||
437 | .verify = cpufreq_gx_verify, | ||
438 | .target = cpufreq_gx_target, | ||
439 | .init = cpufreq_gx_cpu_init, | ||
440 | .name = "gx-suspmod", | ||
441 | .owner = THIS_MODULE, | ||
442 | }; | ||
443 | |||
444 | static int __init cpufreq_gx_init(void) | ||
445 | { | ||
446 | int ret; | ||
447 | struct gxfreq_params *params; | ||
448 | struct pci_dev *gx_pci; | ||
449 | |||
450 | /* Test if we have the right hardware */ | ||
451 | if ((gx_pci = gx_detect_chipset()) == NULL) | ||
452 | return -ENODEV; | ||
453 | |||
454 | /* check whether module parameters are sane */ | ||
455 | if (max_duration > 0xff) | ||
456 | max_duration = 0xff; | ||
457 | |||
458 | dprintk("geode suspend modulation available.\n"); | ||
459 | |||
460 | params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL); | ||
461 | if (params == NULL) | ||
462 | return -ENOMEM; | ||
463 | |||
464 | params->cs55x0 = gx_pci; | ||
465 | gx_params = params; | ||
466 | |||
467 | /* keep cs55x0 configurations */ | ||
468 | pci_read_config_byte(params->cs55x0, PCI_SUSCFG, &(params->pci_suscfg)); | ||
469 | pci_read_config_byte(params->cs55x0, PCI_PMER1, &(params->pci_pmer1)); | ||
470 | pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2)); | ||
471 | pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration)); | ||
472 | pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration)); | ||
473 | |||
474 | if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) { | ||
475 | kfree(params); | ||
476 | return ret; /* register error! */ | ||
477 | } | ||
478 | |||
479 | return 0; | ||
480 | } | ||
481 | |||
482 | static void __exit cpufreq_gx_exit(void) | ||
483 | { | ||
484 | cpufreq_unregister_driver(&gx_suspmod_driver); | ||
485 | pci_dev_put(gx_params->cs55x0); | ||
486 | kfree(gx_params); | ||
487 | } | ||
488 | |||
489 | MODULE_AUTHOR ("Hiroshi Miura <miura@da-cha.org>"); | ||
490 | MODULE_DESCRIPTION ("Cpufreq driver for Cyrix MediaGX and NatSemi Geode"); | ||
491 | MODULE_LICENSE ("GPL"); | ||
492 | |||
493 | module_init(cpufreq_gx_init); | ||
494 | module_exit(cpufreq_gx_exit); | ||
495 | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c new file mode 100644 index 000000000000..f0cce3c2dc3a --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c | |||
@@ -0,0 +1,1024 @@ | |||
1 | /* | ||
2 | * (C) 2001-2004 Dave Jones. <davej@codemonkey.org.uk> | ||
3 | * (C) 2002 Padraig Brady. <padraig@antefacto.com> | ||
4 | * | ||
5 | * Licensed under the terms of the GNU GPL License version 2. | ||
6 | * Based upon datasheets & sample CPUs kindly provided by VIA. | ||
7 | * | ||
8 | * VIA have currently 3 different versions of Longhaul. | ||
9 | * Version 1 (Longhaul) uses the BCR2 MSR at 0x1147. | ||
10 | * It is present only in Samuel 1 (C5A), Samuel 2 (C5B) stepping 0. | ||
11 | * Version 2 of longhaul is backward compatible with v1, but adds | ||
12 | * LONGHAUL MSR for purpose of both frequency and voltage scaling. | ||
13 | * Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C). | ||
14 | * Version 3 of longhaul got renamed to Powersaver and redesigned | ||
15 | * to use only the POWERSAVER MSR at 0x110a. | ||
16 | * It is present in Ezra-T (C5M), Nehemiah (C5X) and above. | ||
17 | * It's pretty much the same feature wise to longhaul v2, though | ||
18 | * there is provision for scaling FSB too, but this doesn't work | ||
19 | * too well in practice so we don't even try to use this. | ||
20 | * | ||
21 | * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* | ||
22 | */ | ||
23 | |||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/moduleparam.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/cpufreq.h> | ||
29 | #include <linux/pci.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/string.h> | ||
32 | #include <linux/delay.h> | ||
33 | |||
34 | #include <asm/msr.h> | ||
35 | #include <asm/timex.h> | ||
36 | #include <asm/io.h> | ||
37 | #include <asm/acpi.h> | ||
38 | #include <linux/acpi.h> | ||
39 | #include <acpi/processor.h> | ||
40 | |||
41 | #include "longhaul.h" | ||
42 | |||
43 | #define PFX "longhaul: " | ||
44 | |||
45 | #define TYPE_LONGHAUL_V1 1 | ||
46 | #define TYPE_LONGHAUL_V2 2 | ||
47 | #define TYPE_POWERSAVER 3 | ||
48 | |||
49 | #define CPU_SAMUEL 1 | ||
50 | #define CPU_SAMUEL2 2 | ||
51 | #define CPU_EZRA 3 | ||
52 | #define CPU_EZRA_T 4 | ||
53 | #define CPU_NEHEMIAH 5 | ||
54 | #define CPU_NEHEMIAH_C 6 | ||
55 | |||
56 | /* Flags */ | ||
57 | #define USE_ACPI_C3 (1 << 1) | ||
58 | #define USE_NORTHBRIDGE (1 << 2) | ||
59 | |||
60 | static int cpu_model; | ||
61 | static unsigned int numscales=16; | ||
62 | static unsigned int fsb; | ||
63 | |||
64 | static const struct mV_pos *vrm_mV_table; | ||
65 | static const unsigned char *mV_vrm_table; | ||
66 | |||
67 | static unsigned int highest_speed, lowest_speed; /* kHz */ | ||
68 | static unsigned int minmult, maxmult; | ||
69 | static int can_scale_voltage; | ||
70 | static struct acpi_processor *pr = NULL; | ||
71 | static struct acpi_processor_cx *cx = NULL; | ||
72 | static u32 acpi_regs_addr; | ||
73 | static u8 longhaul_flags; | ||
74 | static unsigned int longhaul_index; | ||
75 | |||
76 | /* Module parameters */ | ||
77 | static int scale_voltage; | ||
78 | static int disable_acpi_c3; | ||
79 | static int revid_errata; | ||
80 | |||
81 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg) | ||
82 | |||
83 | |||
84 | /* Clock ratios multiplied by 10 */ | ||
85 | static int clock_ratio[32]; | ||
86 | static int eblcr_table[32]; | ||
87 | static int longhaul_version; | ||
88 | static struct cpufreq_frequency_table *longhaul_table; | ||
89 | |||
90 | #ifdef CONFIG_CPU_FREQ_DEBUG | ||
91 | static char speedbuffer[8]; | ||
92 | |||
93 | static char *print_speed(int speed) | ||
94 | { | ||
95 | if (speed < 1000) { | ||
96 | snprintf(speedbuffer, sizeof(speedbuffer),"%dMHz", speed); | ||
97 | return speedbuffer; | ||
98 | } | ||
99 | |||
100 | if (speed%1000 == 0) | ||
101 | snprintf(speedbuffer, sizeof(speedbuffer), | ||
102 | "%dGHz", speed/1000); | ||
103 | else | ||
104 | snprintf(speedbuffer, sizeof(speedbuffer), | ||
105 | "%d.%dGHz", speed/1000, (speed%1000)/100); | ||
106 | |||
107 | return speedbuffer; | ||
108 | } | ||
109 | #endif | ||
110 | |||
111 | |||
112 | static unsigned int calc_speed(int mult) | ||
113 | { | ||
114 | int khz; | ||
115 | khz = (mult/10)*fsb; | ||
116 | if (mult%10) | ||
117 | khz += fsb/2; | ||
118 | khz *= 1000; | ||
119 | return khz; | ||
120 | } | ||
121 | |||
122 | |||
123 | static int longhaul_get_cpu_mult(void) | ||
124 | { | ||
125 | unsigned long invalue=0,lo, hi; | ||
126 | |||
127 | rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi); | ||
128 | invalue = (lo & (1<<22|1<<23|1<<24|1<<25)) >>22; | ||
129 | if (longhaul_version==TYPE_LONGHAUL_V2 || longhaul_version==TYPE_POWERSAVER) { | ||
130 | if (lo & (1<<27)) | ||
131 | invalue+=16; | ||
132 | } | ||
133 | return eblcr_table[invalue]; | ||
134 | } | ||
135 | |||
136 | /* For processor with BCR2 MSR */ | ||
137 | |||
138 | static void do_longhaul1(unsigned int clock_ratio_index) | ||
139 | { | ||
140 | union msr_bcr2 bcr2; | ||
141 | |||
142 | rdmsrl(MSR_VIA_BCR2, bcr2.val); | ||
143 | /* Enable software clock multiplier */ | ||
144 | bcr2.bits.ESOFTBF = 1; | ||
145 | bcr2.bits.CLOCKMUL = clock_ratio_index & 0xff; | ||
146 | |||
147 | /* Sync to timer tick */ | ||
148 | safe_halt(); | ||
149 | /* Change frequency on next halt or sleep */ | ||
150 | wrmsrl(MSR_VIA_BCR2, bcr2.val); | ||
151 | /* Invoke transition */ | ||
152 | ACPI_FLUSH_CPU_CACHE(); | ||
153 | halt(); | ||
154 | |||
155 | /* Disable software clock multiplier */ | ||
156 | local_irq_disable(); | ||
157 | rdmsrl(MSR_VIA_BCR2, bcr2.val); | ||
158 | bcr2.bits.ESOFTBF = 0; | ||
159 | wrmsrl(MSR_VIA_BCR2, bcr2.val); | ||
160 | } | ||
161 | |||
162 | /* For processor with Longhaul MSR */ | ||
163 | |||
164 | static void do_powersaver(int cx_address, unsigned int clock_ratio_index, | ||
165 | unsigned int dir) | ||
166 | { | ||
167 | union msr_longhaul longhaul; | ||
168 | u32 t; | ||
169 | |||
170 | rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); | ||
171 | /* Setup new frequency */ | ||
172 | if (!revid_errata) | ||
173 | longhaul.bits.RevisionKey = longhaul.bits.RevisionID; | ||
174 | else | ||
175 | longhaul.bits.RevisionKey = 0; | ||
176 | longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf; | ||
177 | longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; | ||
178 | /* Setup new voltage */ | ||
179 | if (can_scale_voltage) | ||
180 | longhaul.bits.SoftVID = (clock_ratio_index >> 8) & 0x1f; | ||
181 | /* Sync to timer tick */ | ||
182 | safe_halt(); | ||
183 | /* Raise voltage if necessary */ | ||
184 | if (can_scale_voltage && dir) { | ||
185 | longhaul.bits.EnableSoftVID = 1; | ||
186 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | ||
187 | /* Change voltage */ | ||
188 | if (!cx_address) { | ||
189 | ACPI_FLUSH_CPU_CACHE(); | ||
190 | halt(); | ||
191 | } else { | ||
192 | ACPI_FLUSH_CPU_CACHE(); | ||
193 | /* Invoke C3 */ | ||
194 | inb(cx_address); | ||
195 | /* Dummy op - must do something useless after P_LVL3 | ||
196 | * read */ | ||
197 | t = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
198 | } | ||
199 | longhaul.bits.EnableSoftVID = 0; | ||
200 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | ||
201 | } | ||
202 | |||
203 | /* Change frequency on next halt or sleep */ | ||
204 | longhaul.bits.EnableSoftBusRatio = 1; | ||
205 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | ||
206 | if (!cx_address) { | ||
207 | ACPI_FLUSH_CPU_CACHE(); | ||
208 | halt(); | ||
209 | } else { | ||
210 | ACPI_FLUSH_CPU_CACHE(); | ||
211 | /* Invoke C3 */ | ||
212 | inb(cx_address); | ||
213 | /* Dummy op - must do something useless after P_LVL3 read */ | ||
214 | t = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
215 | } | ||
216 | /* Disable bus ratio bit */ | ||
217 | longhaul.bits.EnableSoftBusRatio = 0; | ||
218 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | ||
219 | |||
220 | /* Reduce voltage if necessary */ | ||
221 | if (can_scale_voltage && !dir) { | ||
222 | longhaul.bits.EnableSoftVID = 1; | ||
223 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | ||
224 | /* Change voltage */ | ||
225 | if (!cx_address) { | ||
226 | ACPI_FLUSH_CPU_CACHE(); | ||
227 | halt(); | ||
228 | } else { | ||
229 | ACPI_FLUSH_CPU_CACHE(); | ||
230 | /* Invoke C3 */ | ||
231 | inb(cx_address); | ||
232 | /* Dummy op - must do something useless after P_LVL3 | ||
233 | * read */ | ||
234 | t = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
235 | } | ||
236 | longhaul.bits.EnableSoftVID = 0; | ||
237 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | ||
238 | } | ||
239 | } | ||
240 | |||
241 | /** | ||
242 | * longhaul_set_cpu_frequency() | ||
243 | * @clock_ratio_index : bitpattern of the new multiplier. | ||
244 | * | ||
245 | * Sets a new clock ratio. | ||
246 | */ | ||
247 | |||
248 | static void longhaul_setstate(unsigned int table_index) | ||
249 | { | ||
250 | unsigned int clock_ratio_index; | ||
251 | int speed, mult; | ||
252 | struct cpufreq_freqs freqs; | ||
253 | unsigned long flags; | ||
254 | unsigned int pic1_mask, pic2_mask; | ||
255 | u16 bm_status = 0; | ||
256 | u32 bm_timeout = 1000; | ||
257 | unsigned int dir = 0; | ||
258 | |||
259 | clock_ratio_index = longhaul_table[table_index].index; | ||
260 | /* Safety precautions */ | ||
261 | mult = clock_ratio[clock_ratio_index & 0x1f]; | ||
262 | if (mult == -1) | ||
263 | return; | ||
264 | speed = calc_speed(mult); | ||
265 | if ((speed > highest_speed) || (speed < lowest_speed)) | ||
266 | return; | ||
267 | /* Voltage transition before frequency transition? */ | ||
268 | if (can_scale_voltage && longhaul_index < table_index) | ||
269 | dir = 1; | ||
270 | |||
271 | freqs.old = calc_speed(longhaul_get_cpu_mult()); | ||
272 | freqs.new = speed; | ||
273 | freqs.cpu = 0; /* longhaul.c is UP only driver */ | ||
274 | |||
275 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
276 | |||
277 | dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", | ||
278 | fsb, mult/10, mult%10, print_speed(speed/1000)); | ||
279 | retry_loop: | ||
280 | preempt_disable(); | ||
281 | local_irq_save(flags); | ||
282 | |||
283 | pic2_mask = inb(0xA1); | ||
284 | pic1_mask = inb(0x21); /* works on C3. save mask. */ | ||
285 | outb(0xFF,0xA1); /* Overkill */ | ||
286 | outb(0xFE,0x21); /* TMR0 only */ | ||
287 | |||
288 | /* Wait while PCI bus is busy. */ | ||
289 | if (acpi_regs_addr && (longhaul_flags & USE_NORTHBRIDGE | ||
290 | || ((pr != NULL) && pr->flags.bm_control))) { | ||
291 | bm_status = inw(acpi_regs_addr); | ||
292 | bm_status &= 1 << 4; | ||
293 | while (bm_status && bm_timeout) { | ||
294 | outw(1 << 4, acpi_regs_addr); | ||
295 | bm_timeout--; | ||
296 | bm_status = inw(acpi_regs_addr); | ||
297 | bm_status &= 1 << 4; | ||
298 | } | ||
299 | } | ||
300 | |||
301 | if (longhaul_flags & USE_NORTHBRIDGE) { | ||
302 | /* Disable AGP and PCI arbiters */ | ||
303 | outb(3, 0x22); | ||
304 | } else if ((pr != NULL) && pr->flags.bm_control) { | ||
305 | /* Disable bus master arbitration */ | ||
306 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); | ||
307 | } | ||
308 | switch (longhaul_version) { | ||
309 | |||
310 | /* | ||
311 | * Longhaul v1. (Samuel[C5A] and Samuel2 stepping 0[C5B]) | ||
312 | * Software controlled multipliers only. | ||
313 | */ | ||
314 | case TYPE_LONGHAUL_V1: | ||
315 | do_longhaul1(clock_ratio_index); | ||
316 | break; | ||
317 | |||
318 | /* | ||
319 | * Longhaul v2 appears in Samuel2 Steppings 1->7 [C5B] and Ezra [C5C] | ||
320 | * | ||
321 | * Longhaul v3 (aka Powersaver). (Ezra-T [C5M] & Nehemiah [C5N]) | ||
322 | * Nehemiah can do FSB scaling too, but this has never been proven | ||
323 | * to work in practice. | ||
324 | */ | ||
325 | case TYPE_LONGHAUL_V2: | ||
326 | case TYPE_POWERSAVER: | ||
327 | if (longhaul_flags & USE_ACPI_C3) { | ||
328 | /* Don't allow wakeup */ | ||
329 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); | ||
330 | do_powersaver(cx->address, clock_ratio_index, dir); | ||
331 | } else { | ||
332 | do_powersaver(0, clock_ratio_index, dir); | ||
333 | } | ||
334 | break; | ||
335 | } | ||
336 | |||
337 | if (longhaul_flags & USE_NORTHBRIDGE) { | ||
338 | /* Enable arbiters */ | ||
339 | outb(0, 0x22); | ||
340 | } else if ((pr != NULL) && pr->flags.bm_control) { | ||
341 | /* Enable bus master arbitration */ | ||
342 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); | ||
343 | } | ||
344 | outb(pic2_mask,0xA1); /* restore mask */ | ||
345 | outb(pic1_mask,0x21); | ||
346 | |||
347 | local_irq_restore(flags); | ||
348 | preempt_enable(); | ||
349 | |||
350 | freqs.new = calc_speed(longhaul_get_cpu_mult()); | ||
351 | /* Check if requested frequency is set. */ | ||
352 | if (unlikely(freqs.new != speed)) { | ||
353 | printk(KERN_INFO PFX "Failed to set requested frequency!\n"); | ||
354 | /* Revision ID = 1 but processor is expecting revision key | ||
355 | * equal to 0. Jumpers at the bottom of processor will change | ||
356 | * multiplier and FSB, but will not change bits in Longhaul | ||
357 | * MSR nor enable voltage scaling. */ | ||
358 | if (!revid_errata) { | ||
359 | printk(KERN_INFO PFX "Enabling \"Ignore Revision ID\" " | ||
360 | "option.\n"); | ||
361 | revid_errata = 1; | ||
362 | msleep(200); | ||
363 | goto retry_loop; | ||
364 | } | ||
365 | /* Why ACPI C3 sometimes doesn't work is a mystery for me. | ||
366 | * But it does happen. Processor is entering ACPI C3 state, | ||
367 | * but it doesn't change frequency. I tried poking various | ||
368 | * bits in northbridge registers, but without success. */ | ||
369 | if (longhaul_flags & USE_ACPI_C3) { | ||
370 | printk(KERN_INFO PFX "Disabling ACPI C3 support.\n"); | ||
371 | longhaul_flags &= ~USE_ACPI_C3; | ||
372 | if (revid_errata) { | ||
373 | printk(KERN_INFO PFX "Disabling \"Ignore " | ||
374 | "Revision ID\" option.\n"); | ||
375 | revid_errata = 0; | ||
376 | } | ||
377 | msleep(200); | ||
378 | goto retry_loop; | ||
379 | } | ||
380 | /* This shouldn't happen. Longhaul ver. 2 was reported not | ||
381 | * working on processors without voltage scaling, but with | ||
382 | * RevID = 1. RevID errata will make things right. Just | ||
383 | * to be 100% sure. */ | ||
384 | if (longhaul_version == TYPE_LONGHAUL_V2) { | ||
385 | printk(KERN_INFO PFX "Switching to Longhaul ver. 1\n"); | ||
386 | longhaul_version = TYPE_LONGHAUL_V1; | ||
387 | msleep(200); | ||
388 | goto retry_loop; | ||
389 | } | ||
390 | } | ||
391 | /* Report true CPU frequency */ | ||
392 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
393 | |||
394 | if (!bm_timeout) | ||
395 | printk(KERN_INFO PFX "Warning: Timeout while waiting for idle PCI bus.\n"); | ||
396 | } | ||
397 | |||
398 | /* | ||
399 | * Centaur decided to make life a little more tricky. | ||
400 | * Only longhaul v1 is allowed to read EBLCR BSEL[0:1]. | ||
401 | * Samuel2 and above have to try and guess what the FSB is. | ||
402 | * We do this by assuming we booted at maximum multiplier, and interpolate | ||
403 | * between that value multiplied by possible FSBs and cpu_mhz which | ||
404 | * was calculated at boot time. Really ugly, but no other way to do this. | ||
405 | */ | ||
406 | |||
407 | #define ROUNDING 0xf | ||
408 | |||
409 | static int guess_fsb(int mult) | ||
410 | { | ||
411 | int speed = cpu_khz / 1000; | ||
412 | int i; | ||
413 | int speeds[] = { 666, 1000, 1333, 2000 }; | ||
414 | int f_max, f_min; | ||
415 | |||
416 | for (i = 0; i < 4; i++) { | ||
417 | f_max = ((speeds[i] * mult) + 50) / 100; | ||
418 | f_max += (ROUNDING / 2); | ||
419 | f_min = f_max - ROUNDING; | ||
420 | if ((speed <= f_max) && (speed >= f_min)) | ||
421 | return speeds[i] / 10; | ||
422 | } | ||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | |||
427 | static int __init longhaul_get_ranges(void) | ||
428 | { | ||
429 | unsigned int i, j, k = 0; | ||
430 | unsigned int ratio; | ||
431 | int mult; | ||
432 | |||
433 | /* Get current frequency */ | ||
434 | mult = longhaul_get_cpu_mult(); | ||
435 | if (mult == -1) { | ||
436 | printk(KERN_INFO PFX "Invalid (reserved) multiplier!\n"); | ||
437 | return -EINVAL; | ||
438 | } | ||
439 | fsb = guess_fsb(mult); | ||
440 | if (fsb == 0) { | ||
441 | printk(KERN_INFO PFX "Invalid (reserved) FSB!\n"); | ||
442 | return -EINVAL; | ||
443 | } | ||
444 | /* Get max multiplier - as we always did. | ||
445 | * Longhaul MSR is usefull only when voltage scaling is enabled. | ||
446 | * C3 is booting at max anyway. */ | ||
447 | maxmult = mult; | ||
448 | /* Get min multiplier */ | ||
449 | switch (cpu_model) { | ||
450 | case CPU_NEHEMIAH: | ||
451 | minmult = 50; | ||
452 | break; | ||
453 | case CPU_NEHEMIAH_C: | ||
454 | minmult = 40; | ||
455 | break; | ||
456 | default: | ||
457 | minmult = 30; | ||
458 | break; | ||
459 | } | ||
460 | |||
461 | dprintk ("MinMult:%d.%dx MaxMult:%d.%dx\n", | ||
462 | minmult/10, minmult%10, maxmult/10, maxmult%10); | ||
463 | |||
464 | highest_speed = calc_speed(maxmult); | ||
465 | lowest_speed = calc_speed(minmult); | ||
466 | dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, | ||
467 | print_speed(lowest_speed/1000), | ||
468 | print_speed(highest_speed/1000)); | ||
469 | |||
470 | if (lowest_speed == highest_speed) { | ||
471 | printk (KERN_INFO PFX "highestspeed == lowest, aborting.\n"); | ||
472 | return -EINVAL; | ||
473 | } | ||
474 | if (lowest_speed > highest_speed) { | ||
475 | printk (KERN_INFO PFX "nonsense! lowest (%d > %d) !\n", | ||
476 | lowest_speed, highest_speed); | ||
477 | return -EINVAL; | ||
478 | } | ||
479 | |||
480 | longhaul_table = kmalloc((numscales + 1) * sizeof(struct cpufreq_frequency_table), GFP_KERNEL); | ||
481 | if(!longhaul_table) | ||
482 | return -ENOMEM; | ||
483 | |||
484 | for (j = 0; j < numscales; j++) { | ||
485 | ratio = clock_ratio[j]; | ||
486 | if (ratio == -1) | ||
487 | continue; | ||
488 | if (ratio > maxmult || ratio < minmult) | ||
489 | continue; | ||
490 | longhaul_table[k].frequency = calc_speed(ratio); | ||
491 | longhaul_table[k].index = j; | ||
492 | k++; | ||
493 | } | ||
494 | if (k <= 1) { | ||
495 | kfree(longhaul_table); | ||
496 | return -ENODEV; | ||
497 | } | ||
498 | /* Sort */ | ||
499 | for (j = 0; j < k - 1; j++) { | ||
500 | unsigned int min_f, min_i; | ||
501 | min_f = longhaul_table[j].frequency; | ||
502 | min_i = j; | ||
503 | for (i = j + 1; i < k; i++) { | ||
504 | if (longhaul_table[i].frequency < min_f) { | ||
505 | min_f = longhaul_table[i].frequency; | ||
506 | min_i = i; | ||
507 | } | ||
508 | } | ||
509 | if (min_i != j) { | ||
510 | unsigned int temp; | ||
511 | temp = longhaul_table[j].frequency; | ||
512 | longhaul_table[j].frequency = longhaul_table[min_i].frequency; | ||
513 | longhaul_table[min_i].frequency = temp; | ||
514 | temp = longhaul_table[j].index; | ||
515 | longhaul_table[j].index = longhaul_table[min_i].index; | ||
516 | longhaul_table[min_i].index = temp; | ||
517 | } | ||
518 | } | ||
519 | |||
520 | longhaul_table[k].frequency = CPUFREQ_TABLE_END; | ||
521 | |||
522 | /* Find index we are running on */ | ||
523 | for (j = 0; j < k; j++) { | ||
524 | if (clock_ratio[longhaul_table[j].index & 0x1f] == mult) { | ||
525 | longhaul_index = j; | ||
526 | break; | ||
527 | } | ||
528 | } | ||
529 | return 0; | ||
530 | } | ||
531 | |||
532 | |||
533 | static void __init longhaul_setup_voltagescaling(void) | ||
534 | { | ||
535 | union msr_longhaul longhaul; | ||
536 | struct mV_pos minvid, maxvid, vid; | ||
537 | unsigned int j, speed, pos, kHz_step, numvscales; | ||
538 | int min_vid_speed; | ||
539 | |||
540 | rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); | ||
541 | if (!(longhaul.bits.RevisionID & 1)) { | ||
542 | printk(KERN_INFO PFX "Voltage scaling not supported by CPU.\n"); | ||
543 | return; | ||
544 | } | ||
545 | |||
546 | if (!longhaul.bits.VRMRev) { | ||
547 | printk(KERN_INFO PFX "VRM 8.5\n"); | ||
548 | vrm_mV_table = &vrm85_mV[0]; | ||
549 | mV_vrm_table = &mV_vrm85[0]; | ||
550 | } else { | ||
551 | printk(KERN_INFO PFX "Mobile VRM\n"); | ||
552 | if (cpu_model < CPU_NEHEMIAH) | ||
553 | return; | ||
554 | vrm_mV_table = &mobilevrm_mV[0]; | ||
555 | mV_vrm_table = &mV_mobilevrm[0]; | ||
556 | } | ||
557 | |||
558 | minvid = vrm_mV_table[longhaul.bits.MinimumVID]; | ||
559 | maxvid = vrm_mV_table[longhaul.bits.MaximumVID]; | ||
560 | |||
561 | if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) { | ||
562 | printk (KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. " | ||
563 | "Voltage scaling disabled.\n", | ||
564 | minvid.mV/1000, minvid.mV%1000, maxvid.mV/1000, maxvid.mV%1000); | ||
565 | return; | ||
566 | } | ||
567 | |||
568 | if (minvid.mV == maxvid.mV) { | ||
569 | printk (KERN_INFO PFX "Claims to support voltage scaling but min & max are " | ||
570 | "both %d.%03d. Voltage scaling disabled\n", | ||
571 | maxvid.mV/1000, maxvid.mV%1000); | ||
572 | return; | ||
573 | } | ||
574 | |||
575 | /* How many voltage steps */ | ||
576 | numvscales = maxvid.pos - minvid.pos + 1; | ||
577 | printk(KERN_INFO PFX | ||
578 | "Max VID=%d.%03d " | ||
579 | "Min VID=%d.%03d, " | ||
580 | "%d possible voltage scales\n", | ||
581 | maxvid.mV/1000, maxvid.mV%1000, | ||
582 | minvid.mV/1000, minvid.mV%1000, | ||
583 | numvscales); | ||
584 | |||
585 | /* Calculate max frequency at min voltage */ | ||
586 | j = longhaul.bits.MinMHzBR; | ||
587 | if (longhaul.bits.MinMHzBR4) | ||
588 | j += 16; | ||
589 | min_vid_speed = eblcr_table[j]; | ||
590 | if (min_vid_speed == -1) | ||
591 | return; | ||
592 | switch (longhaul.bits.MinMHzFSB) { | ||
593 | case 0: | ||
594 | min_vid_speed *= 13333; | ||
595 | break; | ||
596 | case 1: | ||
597 | min_vid_speed *= 10000; | ||
598 | break; | ||
599 | case 3: | ||
600 | min_vid_speed *= 6666; | ||
601 | break; | ||
602 | default: | ||
603 | return; | ||
604 | break; | ||
605 | } | ||
606 | if (min_vid_speed >= highest_speed) | ||
607 | return; | ||
608 | /* Calculate kHz for one voltage step */ | ||
609 | kHz_step = (highest_speed - min_vid_speed) / numvscales; | ||
610 | |||
611 | j = 0; | ||
612 | while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) { | ||
613 | speed = longhaul_table[j].frequency; | ||
614 | if (speed > min_vid_speed) | ||
615 | pos = (speed - min_vid_speed) / kHz_step + minvid.pos; | ||
616 | else | ||
617 | pos = minvid.pos; | ||
618 | longhaul_table[j].index |= mV_vrm_table[pos] << 8; | ||
619 | vid = vrm_mV_table[mV_vrm_table[pos]]; | ||
620 | printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n", speed, j, vid.mV); | ||
621 | j++; | ||
622 | } | ||
623 | |||
624 | can_scale_voltage = 1; | ||
625 | printk(KERN_INFO PFX "Voltage scaling enabled.\n"); | ||
626 | } | ||
627 | |||
628 | |||
629 | static int longhaul_verify(struct cpufreq_policy *policy) | ||
630 | { | ||
631 | return cpufreq_frequency_table_verify(policy, longhaul_table); | ||
632 | } | ||
633 | |||
634 | |||
635 | static int longhaul_target(struct cpufreq_policy *policy, | ||
636 | unsigned int target_freq, unsigned int relation) | ||
637 | { | ||
638 | unsigned int table_index = 0; | ||
639 | unsigned int i; | ||
640 | unsigned int dir = 0; | ||
641 | u8 vid, current_vid; | ||
642 | |||
643 | if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, relation, &table_index)) | ||
644 | return -EINVAL; | ||
645 | |||
646 | /* Don't set same frequency again */ | ||
647 | if (longhaul_index == table_index) | ||
648 | return 0; | ||
649 | |||
650 | if (!can_scale_voltage) | ||
651 | longhaul_setstate(table_index); | ||
652 | else { | ||
653 | /* On test system voltage transitions exceeding single | ||
654 | * step up or down were turning motherboard off. Both | ||
655 | * "ondemand" and "userspace" are unsafe. C7 is doing | ||
656 | * this in hardware, C3 is old and we need to do this | ||
657 | * in software. */ | ||
658 | i = longhaul_index; | ||
659 | current_vid = (longhaul_table[longhaul_index].index >> 8) & 0x1f; | ||
660 | if (table_index > longhaul_index) | ||
661 | dir = 1; | ||
662 | while (i != table_index) { | ||
663 | vid = (longhaul_table[i].index >> 8) & 0x1f; | ||
664 | if (vid != current_vid) { | ||
665 | longhaul_setstate(i); | ||
666 | current_vid = vid; | ||
667 | msleep(200); | ||
668 | } | ||
669 | if (dir) | ||
670 | i++; | ||
671 | else | ||
672 | i--; | ||
673 | } | ||
674 | longhaul_setstate(table_index); | ||
675 | } | ||
676 | longhaul_index = table_index; | ||
677 | return 0; | ||
678 | } | ||
679 | |||
680 | |||
681 | static unsigned int longhaul_get(unsigned int cpu) | ||
682 | { | ||
683 | if (cpu) | ||
684 | return 0; | ||
685 | return calc_speed(longhaul_get_cpu_mult()); | ||
686 | } | ||
687 | |||
688 | static acpi_status longhaul_walk_callback(acpi_handle obj_handle, | ||
689 | u32 nesting_level, | ||
690 | void *context, void **return_value) | ||
691 | { | ||
692 | struct acpi_device *d; | ||
693 | |||
694 | if ( acpi_bus_get_device(obj_handle, &d) ) { | ||
695 | return 0; | ||
696 | } | ||
697 | *return_value = (void *)acpi_driver_data(d); | ||
698 | return 1; | ||
699 | } | ||
700 | |||
701 | /* VIA don't support PM2 reg, but have something similar */ | ||
702 | static int enable_arbiter_disable(void) | ||
703 | { | ||
704 | struct pci_dev *dev; | ||
705 | int status = 1; | ||
706 | int reg; | ||
707 | u8 pci_cmd; | ||
708 | |||
709 | /* Find PLE133 host bridge */ | ||
710 | reg = 0x78; | ||
711 | dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0, | ||
712 | NULL); | ||
713 | /* Find CLE266 host bridge */ | ||
714 | if (dev == NULL) { | ||
715 | reg = 0x76; | ||
716 | dev = pci_get_device(PCI_VENDOR_ID_VIA, | ||
717 | PCI_DEVICE_ID_VIA_862X_0, NULL); | ||
718 | /* Find CN400 V-Link host bridge */ | ||
719 | if (dev == NULL) | ||
720 | dev = pci_get_device(PCI_VENDOR_ID_VIA, 0x7259, NULL); | ||
721 | } | ||
722 | if (dev != NULL) { | ||
723 | /* Enable access to port 0x22 */ | ||
724 | pci_read_config_byte(dev, reg, &pci_cmd); | ||
725 | if (!(pci_cmd & 1<<7)) { | ||
726 | pci_cmd |= 1<<7; | ||
727 | pci_write_config_byte(dev, reg, pci_cmd); | ||
728 | pci_read_config_byte(dev, reg, &pci_cmd); | ||
729 | if (!(pci_cmd & 1<<7)) { | ||
730 | printk(KERN_ERR PFX | ||
731 | "Can't enable access to port 0x22.\n"); | ||
732 | status = 0; | ||
733 | } | ||
734 | } | ||
735 | pci_dev_put(dev); | ||
736 | return status; | ||
737 | } | ||
738 | return 0; | ||
739 | } | ||
740 | |||
741 | static int longhaul_setup_southbridge(void) | ||
742 | { | ||
743 | struct pci_dev *dev; | ||
744 | u8 pci_cmd; | ||
745 | |||
746 | /* Find VT8235 southbridge */ | ||
747 | dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL); | ||
748 | if (dev == NULL) | ||
749 | /* Find VT8237 southbridge */ | ||
750 | dev = pci_get_device(PCI_VENDOR_ID_VIA, | ||
751 | PCI_DEVICE_ID_VIA_8237, NULL); | ||
752 | if (dev != NULL) { | ||
753 | /* Set transition time to max */ | ||
754 | pci_read_config_byte(dev, 0xec, &pci_cmd); | ||
755 | pci_cmd &= ~(1 << 2); | ||
756 | pci_write_config_byte(dev, 0xec, pci_cmd); | ||
757 | pci_read_config_byte(dev, 0xe4, &pci_cmd); | ||
758 | pci_cmd &= ~(1 << 7); | ||
759 | pci_write_config_byte(dev, 0xe4, pci_cmd); | ||
760 | pci_read_config_byte(dev, 0xe5, &pci_cmd); | ||
761 | pci_cmd |= 1 << 7; | ||
762 | pci_write_config_byte(dev, 0xe5, pci_cmd); | ||
763 | /* Get address of ACPI registers block*/ | ||
764 | pci_read_config_byte(dev, 0x81, &pci_cmd); | ||
765 | if (pci_cmd & 1 << 7) { | ||
766 | pci_read_config_dword(dev, 0x88, &acpi_regs_addr); | ||
767 | acpi_regs_addr &= 0xff00; | ||
768 | printk(KERN_INFO PFX "ACPI I/O at 0x%x\n", acpi_regs_addr); | ||
769 | } | ||
770 | |||
771 | pci_dev_put(dev); | ||
772 | return 1; | ||
773 | } | ||
774 | return 0; | ||
775 | } | ||
776 | |||
777 | static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | ||
778 | { | ||
779 | struct cpuinfo_x86 *c = cpu_data; | ||
780 | char *cpuname=NULL; | ||
781 | int ret; | ||
782 | u32 lo, hi; | ||
783 | |||
784 | /* Check what we have on this motherboard */ | ||
785 | switch (c->x86_model) { | ||
786 | case 6: | ||
787 | cpu_model = CPU_SAMUEL; | ||
788 | cpuname = "C3 'Samuel' [C5A]"; | ||
789 | longhaul_version = TYPE_LONGHAUL_V1; | ||
790 | memcpy (clock_ratio, samuel1_clock_ratio, sizeof(samuel1_clock_ratio)); | ||
791 | memcpy (eblcr_table, samuel1_eblcr, sizeof(samuel1_eblcr)); | ||
792 | break; | ||
793 | |||
794 | case 7: | ||
795 | switch (c->x86_mask) { | ||
796 | case 0: | ||
797 | longhaul_version = TYPE_LONGHAUL_V1; | ||
798 | cpu_model = CPU_SAMUEL2; | ||
799 | cpuname = "C3 'Samuel 2' [C5B]"; | ||
800 | /* Note, this is not a typo, early Samuel2's had | ||
801 | * Samuel1 ratios. */ | ||
802 | memcpy(clock_ratio, samuel1_clock_ratio, | ||
803 | sizeof(samuel1_clock_ratio)); | ||
804 | memcpy(eblcr_table, samuel2_eblcr, | ||
805 | sizeof(samuel2_eblcr)); | ||
806 | break; | ||
807 | case 1 ... 15: | ||
808 | longhaul_version = TYPE_LONGHAUL_V1; | ||
809 | if (c->x86_mask < 8) { | ||
810 | cpu_model = CPU_SAMUEL2; | ||
811 | cpuname = "C3 'Samuel 2' [C5B]"; | ||
812 | } else { | ||
813 | cpu_model = CPU_EZRA; | ||
814 | cpuname = "C3 'Ezra' [C5C]"; | ||
815 | } | ||
816 | memcpy(clock_ratio, ezra_clock_ratio, | ||
817 | sizeof(ezra_clock_ratio)); | ||
818 | memcpy(eblcr_table, ezra_eblcr, | ||
819 | sizeof(ezra_eblcr)); | ||
820 | break; | ||
821 | } | ||
822 | break; | ||
823 | |||
824 | case 8: | ||
825 | cpu_model = CPU_EZRA_T; | ||
826 | cpuname = "C3 'Ezra-T' [C5M]"; | ||
827 | longhaul_version = TYPE_POWERSAVER; | ||
828 | numscales=32; | ||
829 | memcpy (clock_ratio, ezrat_clock_ratio, sizeof(ezrat_clock_ratio)); | ||
830 | memcpy (eblcr_table, ezrat_eblcr, sizeof(ezrat_eblcr)); | ||
831 | break; | ||
832 | |||
833 | case 9: | ||
834 | longhaul_version = TYPE_POWERSAVER; | ||
835 | numscales = 32; | ||
836 | memcpy(clock_ratio, | ||
837 | nehemiah_clock_ratio, | ||
838 | sizeof(nehemiah_clock_ratio)); | ||
839 | memcpy(eblcr_table, nehemiah_eblcr, sizeof(nehemiah_eblcr)); | ||
840 | switch (c->x86_mask) { | ||
841 | case 0 ... 1: | ||
842 | cpu_model = CPU_NEHEMIAH; | ||
843 | cpuname = "C3 'Nehemiah A' [C5XLOE]"; | ||
844 | break; | ||
845 | case 2 ... 4: | ||
846 | cpu_model = CPU_NEHEMIAH; | ||
847 | cpuname = "C3 'Nehemiah B' [C5XLOH]"; | ||
848 | break; | ||
849 | case 5 ... 15: | ||
850 | cpu_model = CPU_NEHEMIAH_C; | ||
851 | cpuname = "C3 'Nehemiah C' [C5P]"; | ||
852 | break; | ||
853 | } | ||
854 | break; | ||
855 | |||
856 | default: | ||
857 | cpuname = "Unknown"; | ||
858 | break; | ||
859 | } | ||
860 | /* Check Longhaul ver. 2 */ | ||
861 | if (longhaul_version == TYPE_LONGHAUL_V2) { | ||
862 | rdmsr(MSR_VIA_LONGHAUL, lo, hi); | ||
863 | if (lo == 0 && hi == 0) | ||
864 | /* Looks like MSR isn't present */ | ||
865 | longhaul_version = TYPE_LONGHAUL_V1; | ||
866 | } | ||
867 | |||
868 | printk (KERN_INFO PFX "VIA %s CPU detected. ", cpuname); | ||
869 | switch (longhaul_version) { | ||
870 | case TYPE_LONGHAUL_V1: | ||
871 | case TYPE_LONGHAUL_V2: | ||
872 | printk ("Longhaul v%d supported.\n", longhaul_version); | ||
873 | break; | ||
874 | case TYPE_POWERSAVER: | ||
875 | printk ("Powersaver supported.\n"); | ||
876 | break; | ||
877 | }; | ||
878 | |||
879 | /* Doesn't hurt */ | ||
880 | longhaul_setup_southbridge(); | ||
881 | |||
882 | /* Find ACPI data for processor */ | ||
883 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, | ||
884 | ACPI_UINT32_MAX, &longhaul_walk_callback, | ||
885 | NULL, (void *)&pr); | ||
886 | |||
887 | /* Check ACPI support for C3 state */ | ||
888 | if (pr != NULL && longhaul_version == TYPE_POWERSAVER) { | ||
889 | cx = &pr->power.states[ACPI_STATE_C3]; | ||
890 | if (cx->address > 0 && cx->latency <= 1000) | ||
891 | longhaul_flags |= USE_ACPI_C3; | ||
892 | } | ||
893 | /* Disable if it isn't working */ | ||
894 | if (disable_acpi_c3) | ||
895 | longhaul_flags &= ~USE_ACPI_C3; | ||
896 | /* Check if northbridge is friendly */ | ||
897 | if (enable_arbiter_disable()) | ||
898 | longhaul_flags |= USE_NORTHBRIDGE; | ||
899 | |||
900 | /* Check ACPI support for bus master arbiter disable */ | ||
901 | if (!(longhaul_flags & USE_ACPI_C3 | ||
902 | || longhaul_flags & USE_NORTHBRIDGE) | ||
903 | && ((pr == NULL) || !(pr->flags.bm_control))) { | ||
904 | printk(KERN_ERR PFX | ||
905 | "No ACPI support. Unsupported northbridge.\n"); | ||
906 | return -ENODEV; | ||
907 | } | ||
908 | |||
909 | if (longhaul_flags & USE_NORTHBRIDGE) | ||
910 | printk(KERN_INFO PFX "Using northbridge support.\n"); | ||
911 | if (longhaul_flags & USE_ACPI_C3) | ||
912 | printk(KERN_INFO PFX "Using ACPI support.\n"); | ||
913 | |||
914 | ret = longhaul_get_ranges(); | ||
915 | if (ret != 0) | ||
916 | return ret; | ||
917 | |||
918 | if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0)) | ||
919 | longhaul_setup_voltagescaling(); | ||
920 | |||
921 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
922 | policy->cpuinfo.transition_latency = 200000; /* nsec */ | ||
923 | policy->cur = calc_speed(longhaul_get_cpu_mult()); | ||
924 | |||
925 | ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table); | ||
926 | if (ret) | ||
927 | return ret; | ||
928 | |||
929 | cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); | ||
930 | |||
931 | return 0; | ||
932 | } | ||
933 | |||
934 | static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) | ||
935 | { | ||
936 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
937 | return 0; | ||
938 | } | ||
939 | |||
940 | static struct freq_attr* longhaul_attr[] = { | ||
941 | &cpufreq_freq_attr_scaling_available_freqs, | ||
942 | NULL, | ||
943 | }; | ||
944 | |||
945 | static struct cpufreq_driver longhaul_driver = { | ||
946 | .verify = longhaul_verify, | ||
947 | .target = longhaul_target, | ||
948 | .get = longhaul_get, | ||
949 | .init = longhaul_cpu_init, | ||
950 | .exit = __devexit_p(longhaul_cpu_exit), | ||
951 | .name = "longhaul", | ||
952 | .owner = THIS_MODULE, | ||
953 | .attr = longhaul_attr, | ||
954 | }; | ||
955 | |||
956 | |||
957 | static int __init longhaul_init(void) | ||
958 | { | ||
959 | struct cpuinfo_x86 *c = cpu_data; | ||
960 | |||
961 | if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6) | ||
962 | return -ENODEV; | ||
963 | |||
964 | #ifdef CONFIG_SMP | ||
965 | if (num_online_cpus() > 1) { | ||
966 | printk(KERN_ERR PFX "More than 1 CPU detected, longhaul disabled.\n"); | ||
967 | return -ENODEV; | ||
968 | } | ||
969 | #endif | ||
970 | #ifdef CONFIG_X86_IO_APIC | ||
971 | if (cpu_has_apic) { | ||
972 | printk(KERN_ERR PFX "APIC detected. Longhaul is currently broken in this configuration.\n"); | ||
973 | return -ENODEV; | ||
974 | } | ||
975 | #endif | ||
976 | switch (c->x86_model) { | ||
977 | case 6 ... 9: | ||
978 | return cpufreq_register_driver(&longhaul_driver); | ||
979 | case 10: | ||
980 | printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n"); | ||
981 | default: | ||
982 | ;; | ||
983 | } | ||
984 | |||
985 | return -ENODEV; | ||
986 | } | ||
987 | |||
988 | |||
989 | static void __exit longhaul_exit(void) | ||
990 | { | ||
991 | int i; | ||
992 | |||
993 | for (i=0; i < numscales; i++) { | ||
994 | if (clock_ratio[i] == maxmult) { | ||
995 | longhaul_setstate(i); | ||
996 | break; | ||
997 | } | ||
998 | } | ||
999 | |||
1000 | cpufreq_unregister_driver(&longhaul_driver); | ||
1001 | kfree(longhaul_table); | ||
1002 | } | ||
1003 | |||
1004 | /* Even if BIOS is exporting ACPI C3 state, and it is used | ||
1005 | * with success when CPU is idle, this state doesn't | ||
1006 | * trigger frequency transition in some cases. */ | ||
1007 | module_param (disable_acpi_c3, int, 0644); | ||
1008 | MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support"); | ||
1009 | /* Change CPU voltage with frequency. Very usefull to save | ||
1010 | * power, but most VIA C3 processors aren't supporting it. */ | ||
1011 | module_param (scale_voltage, int, 0644); | ||
1012 | MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor"); | ||
1013 | /* Force revision key to 0 for processors which doesn't | ||
1014 | * support voltage scaling, but are introducing itself as | ||
1015 | * such. */ | ||
1016 | module_param(revid_errata, int, 0644); | ||
1017 | MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID"); | ||
1018 | |||
1019 | MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>"); | ||
1020 | MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); | ||
1021 | MODULE_LICENSE ("GPL"); | ||
1022 | |||
1023 | late_initcall(longhaul_init); | ||
1024 | module_exit(longhaul_exit); | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.h b/arch/x86/kernel/cpu/cpufreq/longhaul.h new file mode 100644 index 000000000000..4fcc320997df --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.h | |||
@@ -0,0 +1,353 @@ | |||
1 | /* | ||
2 | * longhaul.h | ||
3 | * (C) 2003 Dave Jones. | ||
4 | * | ||
5 | * Licensed under the terms of the GNU GPL License version 2. | ||
6 | * | ||
7 | * VIA-specific information | ||
8 | */ | ||
9 | |||
10 | union msr_bcr2 { | ||
11 | struct { | ||
12 | unsigned Reseved:19, // 18:0 | ||
13 | ESOFTBF:1, // 19 | ||
14 | Reserved2:3, // 22:20 | ||
15 | CLOCKMUL:4, // 26:23 | ||
16 | Reserved3:5; // 31:27 | ||
17 | } bits; | ||
18 | unsigned long val; | ||
19 | }; | ||
20 | |||
21 | union msr_longhaul { | ||
22 | struct { | ||
23 | unsigned RevisionID:4, // 3:0 | ||
24 | RevisionKey:4, // 7:4 | ||
25 | EnableSoftBusRatio:1, // 8 | ||
26 | EnableSoftVID:1, // 9 | ||
27 | EnableSoftBSEL:1, // 10 | ||
28 | Reserved:3, // 11:13 | ||
29 | SoftBusRatio4:1, // 14 | ||
30 | VRMRev:1, // 15 | ||
31 | SoftBusRatio:4, // 19:16 | ||
32 | SoftVID:5, // 24:20 | ||
33 | Reserved2:3, // 27:25 | ||
34 | SoftBSEL:2, // 29:28 | ||
35 | Reserved3:2, // 31:30 | ||
36 | MaxMHzBR:4, // 35:32 | ||
37 | MaximumVID:5, // 40:36 | ||
38 | MaxMHzFSB:2, // 42:41 | ||
39 | MaxMHzBR4:1, // 43 | ||
40 | Reserved4:4, // 47:44 | ||
41 | MinMHzBR:4, // 51:48 | ||
42 | MinimumVID:5, // 56:52 | ||
43 | MinMHzFSB:2, // 58:57 | ||
44 | MinMHzBR4:1, // 59 | ||
45 | Reserved5:4; // 63:60 | ||
46 | } bits; | ||
47 | unsigned long long val; | ||
48 | }; | ||
49 | |||
50 | /* | ||
51 | * Clock ratio tables. Div/Mod by 10 to get ratio. | ||
52 | * The eblcr ones specify the ratio read from the CPU. | ||
53 | * The clock_ratio ones specify what to write to the CPU. | ||
54 | */ | ||
55 | |||
56 | /* | ||
57 | * VIA C3 Samuel 1 & Samuel 2 (stepping 0) | ||
58 | */ | ||
59 | static const int __initdata samuel1_clock_ratio[16] = { | ||
60 | -1, /* 0000 -> RESERVED */ | ||
61 | 30, /* 0001 -> 3.0x */ | ||
62 | 40, /* 0010 -> 4.0x */ | ||
63 | -1, /* 0011 -> RESERVED */ | ||
64 | -1, /* 0100 -> RESERVED */ | ||
65 | 35, /* 0101 -> 3.5x */ | ||
66 | 45, /* 0110 -> 4.5x */ | ||
67 | 55, /* 0111 -> 5.5x */ | ||
68 | 60, /* 1000 -> 6.0x */ | ||
69 | 70, /* 1001 -> 7.0x */ | ||
70 | 80, /* 1010 -> 8.0x */ | ||
71 | 50, /* 1011 -> 5.0x */ | ||
72 | 65, /* 1100 -> 6.5x */ | ||
73 | 75, /* 1101 -> 7.5x */ | ||
74 | -1, /* 1110 -> RESERVED */ | ||
75 | -1, /* 1111 -> RESERVED */ | ||
76 | }; | ||
77 | |||
78 | static const int __initdata samuel1_eblcr[16] = { | ||
79 | 50, /* 0000 -> RESERVED */ | ||
80 | 30, /* 0001 -> 3.0x */ | ||
81 | 40, /* 0010 -> 4.0x */ | ||
82 | -1, /* 0011 -> RESERVED */ | ||
83 | 55, /* 0100 -> 5.5x */ | ||
84 | 35, /* 0101 -> 3.5x */ | ||
85 | 45, /* 0110 -> 4.5x */ | ||
86 | -1, /* 0111 -> RESERVED */ | ||
87 | -1, /* 1000 -> RESERVED */ | ||
88 | 70, /* 1001 -> 7.0x */ | ||
89 | 80, /* 1010 -> 8.0x */ | ||
90 | 60, /* 1011 -> 6.0x */ | ||
91 | -1, /* 1100 -> RESERVED */ | ||
92 | 75, /* 1101 -> 7.5x */ | ||
93 | -1, /* 1110 -> RESERVED */ | ||
94 | 65, /* 1111 -> 6.5x */ | ||
95 | }; | ||
96 | |||
97 | /* | ||
98 | * VIA C3 Samuel2 Stepping 1->15 | ||
99 | */ | ||
100 | static const int __initdata samuel2_eblcr[16] = { | ||
101 | 50, /* 0000 -> 5.0x */ | ||
102 | 30, /* 0001 -> 3.0x */ | ||
103 | 40, /* 0010 -> 4.0x */ | ||
104 | 100, /* 0011 -> 10.0x */ | ||
105 | 55, /* 0100 -> 5.5x */ | ||
106 | 35, /* 0101 -> 3.5x */ | ||
107 | 45, /* 0110 -> 4.5x */ | ||
108 | 110, /* 0111 -> 11.0x */ | ||
109 | 90, /* 1000 -> 9.0x */ | ||
110 | 70, /* 1001 -> 7.0x */ | ||
111 | 80, /* 1010 -> 8.0x */ | ||
112 | 60, /* 1011 -> 6.0x */ | ||
113 | 120, /* 1100 -> 12.0x */ | ||
114 | 75, /* 1101 -> 7.5x */ | ||
115 | 130, /* 1110 -> 13.0x */ | ||
116 | 65, /* 1111 -> 6.5x */ | ||
117 | }; | ||
118 | |||
119 | /* | ||
120 | * VIA C3 Ezra | ||
121 | */ | ||
122 | static const int __initdata ezra_clock_ratio[16] = { | ||
123 | 100, /* 0000 -> 10.0x */ | ||
124 | 30, /* 0001 -> 3.0x */ | ||
125 | 40, /* 0010 -> 4.0x */ | ||
126 | 90, /* 0011 -> 9.0x */ | ||
127 | 95, /* 0100 -> 9.5x */ | ||
128 | 35, /* 0101 -> 3.5x */ | ||
129 | 45, /* 0110 -> 4.5x */ | ||
130 | 55, /* 0111 -> 5.5x */ | ||
131 | 60, /* 1000 -> 6.0x */ | ||
132 | 70, /* 1001 -> 7.0x */ | ||
133 | 80, /* 1010 -> 8.0x */ | ||
134 | 50, /* 1011 -> 5.0x */ | ||
135 | 65, /* 1100 -> 6.5x */ | ||
136 | 75, /* 1101 -> 7.5x */ | ||
137 | 85, /* 1110 -> 8.5x */ | ||
138 | 120, /* 1111 -> 12.0x */ | ||
139 | }; | ||
140 | |||
141 | static const int __initdata ezra_eblcr[16] = { | ||
142 | 50, /* 0000 -> 5.0x */ | ||
143 | 30, /* 0001 -> 3.0x */ | ||
144 | 40, /* 0010 -> 4.0x */ | ||
145 | 100, /* 0011 -> 10.0x */ | ||
146 | 55, /* 0100 -> 5.5x */ | ||
147 | 35, /* 0101 -> 3.5x */ | ||
148 | 45, /* 0110 -> 4.5x */ | ||
149 | 95, /* 0111 -> 9.5x */ | ||
150 | 90, /* 1000 -> 9.0x */ | ||
151 | 70, /* 1001 -> 7.0x */ | ||
152 | 80, /* 1010 -> 8.0x */ | ||
153 | 60, /* 1011 -> 6.0x */ | ||
154 | 120, /* 1100 -> 12.0x */ | ||
155 | 75, /* 1101 -> 7.5x */ | ||
156 | 85, /* 1110 -> 8.5x */ | ||
157 | 65, /* 1111 -> 6.5x */ | ||
158 | }; | ||
159 | |||
160 | /* | ||
161 | * VIA C3 (Ezra-T) [C5M]. | ||
162 | */ | ||
163 | static const int __initdata ezrat_clock_ratio[32] = { | ||
164 | 100, /* 0000 -> 10.0x */ | ||
165 | 30, /* 0001 -> 3.0x */ | ||
166 | 40, /* 0010 -> 4.0x */ | ||
167 | 90, /* 0011 -> 9.0x */ | ||
168 | 95, /* 0100 -> 9.5x */ | ||
169 | 35, /* 0101 -> 3.5x */ | ||
170 | 45, /* 0110 -> 4.5x */ | ||
171 | 55, /* 0111 -> 5.5x */ | ||
172 | 60, /* 1000 -> 6.0x */ | ||
173 | 70, /* 1001 -> 7.0x */ | ||
174 | 80, /* 1010 -> 8.0x */ | ||
175 | 50, /* 1011 -> 5.0x */ | ||
176 | 65, /* 1100 -> 6.5x */ | ||
177 | 75, /* 1101 -> 7.5x */ | ||
178 | 85, /* 1110 -> 8.5x */ | ||
179 | 120, /* 1111 -> 12.0x */ | ||
180 | |||
181 | -1, /* 0000 -> RESERVED (10.0x) */ | ||
182 | 110, /* 0001 -> 11.0x */ | ||
183 | -1, /* 0010 -> 12.0x */ | ||
184 | -1, /* 0011 -> RESERVED (9.0x)*/ | ||
185 | 105, /* 0100 -> 10.5x */ | ||
186 | 115, /* 0101 -> 11.5x */ | ||
187 | 125, /* 0110 -> 12.5x */ | ||
188 | 135, /* 0111 -> 13.5x */ | ||
189 | 140, /* 1000 -> 14.0x */ | ||
190 | 150, /* 1001 -> 15.0x */ | ||
191 | 160, /* 1010 -> 16.0x */ | ||
192 | 130, /* 1011 -> 13.0x */ | ||
193 | 145, /* 1100 -> 14.5x */ | ||
194 | 155, /* 1101 -> 15.5x */ | ||
195 | -1, /* 1110 -> RESERVED (13.0x) */ | ||
196 | -1, /* 1111 -> RESERVED (12.0x) */ | ||
197 | }; | ||
198 | |||
199 | static const int __initdata ezrat_eblcr[32] = { | ||
200 | 50, /* 0000 -> 5.0x */ | ||
201 | 30, /* 0001 -> 3.0x */ | ||
202 | 40, /* 0010 -> 4.0x */ | ||
203 | 100, /* 0011 -> 10.0x */ | ||
204 | 55, /* 0100 -> 5.5x */ | ||
205 | 35, /* 0101 -> 3.5x */ | ||
206 | 45, /* 0110 -> 4.5x */ | ||
207 | 95, /* 0111 -> 9.5x */ | ||
208 | 90, /* 1000 -> 9.0x */ | ||
209 | 70, /* 1001 -> 7.0x */ | ||
210 | 80, /* 1010 -> 8.0x */ | ||
211 | 60, /* 1011 -> 6.0x */ | ||
212 | 120, /* 1100 -> 12.0x */ | ||
213 | 75, /* 1101 -> 7.5x */ | ||
214 | 85, /* 1110 -> 8.5x */ | ||
215 | 65, /* 1111 -> 6.5x */ | ||
216 | |||
217 | -1, /* 0000 -> RESERVED (9.0x) */ | ||
218 | 110, /* 0001 -> 11.0x */ | ||
219 | 120, /* 0010 -> 12.0x */ | ||
220 | -1, /* 0011 -> RESERVED (10.0x)*/ | ||
221 | 135, /* 0100 -> 13.5x */ | ||
222 | 115, /* 0101 -> 11.5x */ | ||
223 | 125, /* 0110 -> 12.5x */ | ||
224 | 105, /* 0111 -> 10.5x */ | ||
225 | 130, /* 1000 -> 13.0x */ | ||
226 | 150, /* 1001 -> 15.0x */ | ||
227 | 160, /* 1010 -> 16.0x */ | ||
228 | 140, /* 1011 -> 14.0x */ | ||
229 | -1, /* 1100 -> RESERVED (12.0x) */ | ||
230 | 155, /* 1101 -> 15.5x */ | ||
231 | -1, /* 1110 -> RESERVED (13.0x) */ | ||
232 | 145, /* 1111 -> 14.5x */ | ||
233 | }; | ||
234 | |||
235 | /* | ||
236 | * VIA C3 Nehemiah */ | ||
237 | |||
238 | static const int __initdata nehemiah_clock_ratio[32] = { | ||
239 | 100, /* 0000 -> 10.0x */ | ||
240 | -1, /* 0001 -> 16.0x */ | ||
241 | 40, /* 0010 -> 4.0x */ | ||
242 | 90, /* 0011 -> 9.0x */ | ||
243 | 95, /* 0100 -> 9.5x */ | ||
244 | -1, /* 0101 -> RESERVED */ | ||
245 | 45, /* 0110 -> 4.5x */ | ||
246 | 55, /* 0111 -> 5.5x */ | ||
247 | 60, /* 1000 -> 6.0x */ | ||
248 | 70, /* 1001 -> 7.0x */ | ||
249 | 80, /* 1010 -> 8.0x */ | ||
250 | 50, /* 1011 -> 5.0x */ | ||
251 | 65, /* 1100 -> 6.5x */ | ||
252 | 75, /* 1101 -> 7.5x */ | ||
253 | 85, /* 1110 -> 8.5x */ | ||
254 | 120, /* 1111 -> 12.0x */ | ||
255 | -1, /* 0000 -> 10.0x */ | ||
256 | 110, /* 0001 -> 11.0x */ | ||
257 | -1, /* 0010 -> 12.0x */ | ||
258 | -1, /* 0011 -> 9.0x */ | ||
259 | 105, /* 0100 -> 10.5x */ | ||
260 | 115, /* 0101 -> 11.5x */ | ||
261 | 125, /* 0110 -> 12.5x */ | ||
262 | 135, /* 0111 -> 13.5x */ | ||
263 | 140, /* 1000 -> 14.0x */ | ||
264 | 150, /* 1001 -> 15.0x */ | ||
265 | 160, /* 1010 -> 16.0x */ | ||
266 | 130, /* 1011 -> 13.0x */ | ||
267 | 145, /* 1100 -> 14.5x */ | ||
268 | 155, /* 1101 -> 15.5x */ | ||
269 | -1, /* 1110 -> RESERVED (13.0x) */ | ||
270 | -1, /* 1111 -> 12.0x */ | ||
271 | }; | ||
272 | |||
273 | static const int __initdata nehemiah_eblcr[32] = { | ||
274 | 50, /* 0000 -> 5.0x */ | ||
275 | 160, /* 0001 -> 16.0x */ | ||
276 | 40, /* 0010 -> 4.0x */ | ||
277 | 100, /* 0011 -> 10.0x */ | ||
278 | 55, /* 0100 -> 5.5x */ | ||
279 | -1, /* 0101 -> RESERVED */ | ||
280 | 45, /* 0110 -> 4.5x */ | ||
281 | 95, /* 0111 -> 9.5x */ | ||
282 | 90, /* 1000 -> 9.0x */ | ||
283 | 70, /* 1001 -> 7.0x */ | ||
284 | 80, /* 1010 -> 8.0x */ | ||
285 | 60, /* 1011 -> 6.0x */ | ||
286 | 120, /* 1100 -> 12.0x */ | ||
287 | 75, /* 1101 -> 7.5x */ | ||
288 | 85, /* 1110 -> 8.5x */ | ||
289 | 65, /* 1111 -> 6.5x */ | ||
290 | 90, /* 0000 -> 9.0x */ | ||
291 | 110, /* 0001 -> 11.0x */ | ||
292 | 120, /* 0010 -> 12.0x */ | ||
293 | 100, /* 0011 -> 10.0x */ | ||
294 | 135, /* 0100 -> 13.5x */ | ||
295 | 115, /* 0101 -> 11.5x */ | ||
296 | 125, /* 0110 -> 12.5x */ | ||
297 | 105, /* 0111 -> 10.5x */ | ||
298 | 130, /* 1000 -> 13.0x */ | ||
299 | 150, /* 1001 -> 15.0x */ | ||
300 | 160, /* 1010 -> 16.0x */ | ||
301 | 140, /* 1011 -> 14.0x */ | ||
302 | 120, /* 1100 -> 12.0x */ | ||
303 | 155, /* 1101 -> 15.5x */ | ||
304 | -1, /* 1110 -> RESERVED (13.0x) */ | ||
305 | 145 /* 1111 -> 14.5x */ | ||
306 | }; | ||
307 | |||
308 | /* | ||
309 | * Voltage scales. Div/Mod by 1000 to get actual voltage. | ||
310 | * Which scale to use depends on the VRM type in use. | ||
311 | */ | ||
312 | |||
313 | struct mV_pos { | ||
314 | unsigned short mV; | ||
315 | unsigned short pos; | ||
316 | }; | ||
317 | |||
318 | static const struct mV_pos __initdata vrm85_mV[32] = { | ||
319 | {1250, 8}, {1200, 6}, {1150, 4}, {1100, 2}, | ||
320 | {1050, 0}, {1800, 30}, {1750, 28}, {1700, 26}, | ||
321 | {1650, 24}, {1600, 22}, {1550, 20}, {1500, 18}, | ||
322 | {1450, 16}, {1400, 14}, {1350, 12}, {1300, 10}, | ||
323 | {1275, 9}, {1225, 7}, {1175, 5}, {1125, 3}, | ||
324 | {1075, 1}, {1825, 31}, {1775, 29}, {1725, 27}, | ||
325 | {1675, 25}, {1625, 23}, {1575, 21}, {1525, 19}, | ||
326 | {1475, 17}, {1425, 15}, {1375, 13}, {1325, 11} | ||
327 | }; | ||
328 | |||
329 | static const unsigned char __initdata mV_vrm85[32] = { | ||
330 | 0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11, | ||
331 | 0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d, | ||
332 | 0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19, | ||
333 | 0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15 | ||
334 | }; | ||
335 | |||
336 | static const struct mV_pos __initdata mobilevrm_mV[32] = { | ||
337 | {1750, 31}, {1700, 30}, {1650, 29}, {1600, 28}, | ||
338 | {1550, 27}, {1500, 26}, {1450, 25}, {1400, 24}, | ||
339 | {1350, 23}, {1300, 22}, {1250, 21}, {1200, 20}, | ||
340 | {1150, 19}, {1100, 18}, {1050, 17}, {1000, 16}, | ||
341 | {975, 15}, {950, 14}, {925, 13}, {900, 12}, | ||
342 | {875, 11}, {850, 10}, {825, 9}, {800, 8}, | ||
343 | {775, 7}, {750, 6}, {725, 5}, {700, 4}, | ||
344 | {675, 3}, {650, 2}, {625, 1}, {600, 0} | ||
345 | }; | ||
346 | |||
347 | static const unsigned char __initdata mV_mobilevrm[32] = { | ||
348 | 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, | ||
349 | 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, | ||
350 | 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, | ||
351 | 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00 | ||
352 | }; | ||
353 | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c new file mode 100644 index 000000000000..b2689514295a --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/longrun.c | |||
@@ -0,0 +1,325 @@ | |||
1 | /* | ||
2 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> | ||
3 | * | ||
4 | * Licensed under the terms of the GNU GPL License version 2. | ||
5 | * | ||
6 | * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/cpufreq.h> | ||
14 | |||
15 | #include <asm/msr.h> | ||
16 | #include <asm/processor.h> | ||
17 | #include <asm/timex.h> | ||
18 | |||
19 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longrun", msg) | ||
20 | |||
21 | static struct cpufreq_driver longrun_driver; | ||
22 | |||
23 | /** | ||
24 | * longrun_{low,high}_freq is needed for the conversion of cpufreq kHz | ||
25 | * values into per cent values. In TMTA microcode, the following is valid: | ||
26 | * performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) | ||
27 | */ | ||
28 | static unsigned int longrun_low_freq, longrun_high_freq; | ||
29 | |||
30 | |||
31 | /** | ||
32 | * longrun_get_policy - get the current LongRun policy | ||
33 | * @policy: struct cpufreq_policy where current policy is written into | ||
34 | * | ||
35 | * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS | ||
36 | * and MSR_TMTA_LONGRUN_CTRL | ||
37 | */ | ||
38 | static void __init longrun_get_policy(struct cpufreq_policy *policy) | ||
39 | { | ||
40 | u32 msr_lo, msr_hi; | ||
41 | |||
42 | rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); | ||
43 | dprintk("longrun flags are %x - %x\n", msr_lo, msr_hi); | ||
44 | if (msr_lo & 0x01) | ||
45 | policy->policy = CPUFREQ_POLICY_PERFORMANCE; | ||
46 | else | ||
47 | policy->policy = CPUFREQ_POLICY_POWERSAVE; | ||
48 | |||
49 | rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); | ||
50 | dprintk("longrun ctrl is %x - %x\n", msr_lo, msr_hi); | ||
51 | msr_lo &= 0x0000007F; | ||
52 | msr_hi &= 0x0000007F; | ||
53 | |||
54 | if ( longrun_high_freq <= longrun_low_freq ) { | ||
55 | /* Assume degenerate Longrun table */ | ||
56 | policy->min = policy->max = longrun_high_freq; | ||
57 | } else { | ||
58 | policy->min = longrun_low_freq + msr_lo * | ||
59 | ((longrun_high_freq - longrun_low_freq) / 100); | ||
60 | policy->max = longrun_low_freq + msr_hi * | ||
61 | ((longrun_high_freq - longrun_low_freq) / 100); | ||
62 | } | ||
63 | policy->cpu = 0; | ||
64 | } | ||
65 | |||
66 | |||
67 | /** | ||
68 | * longrun_set_policy - sets a new CPUFreq policy | ||
69 | * @policy: new policy | ||
70 | * | ||
71 | * Sets a new CPUFreq policy on LongRun-capable processors. This function | ||
72 | * has to be called with cpufreq_driver locked. | ||
73 | */ | ||
74 | static int longrun_set_policy(struct cpufreq_policy *policy) | ||
75 | { | ||
76 | u32 msr_lo, msr_hi; | ||
77 | u32 pctg_lo, pctg_hi; | ||
78 | |||
79 | if (!policy) | ||
80 | return -EINVAL; | ||
81 | |||
82 | if ( longrun_high_freq <= longrun_low_freq ) { | ||
83 | /* Assume degenerate Longrun table */ | ||
84 | pctg_lo = pctg_hi = 100; | ||
85 | } else { | ||
86 | pctg_lo = (policy->min - longrun_low_freq) / | ||
87 | ((longrun_high_freq - longrun_low_freq) / 100); | ||
88 | pctg_hi = (policy->max - longrun_low_freq) / | ||
89 | ((longrun_high_freq - longrun_low_freq) / 100); | ||
90 | } | ||
91 | |||
92 | if (pctg_hi > 100) | ||
93 | pctg_hi = 100; | ||
94 | if (pctg_lo > pctg_hi) | ||
95 | pctg_lo = pctg_hi; | ||
96 | |||
97 | /* performance or economy mode */ | ||
98 | rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); | ||
99 | msr_lo &= 0xFFFFFFFE; | ||
100 | switch (policy->policy) { | ||
101 | case CPUFREQ_POLICY_PERFORMANCE: | ||
102 | msr_lo |= 0x00000001; | ||
103 | break; | ||
104 | case CPUFREQ_POLICY_POWERSAVE: | ||
105 | break; | ||
106 | } | ||
107 | wrmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); | ||
108 | |||
109 | /* lower and upper boundary */ | ||
110 | rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); | ||
111 | msr_lo &= 0xFFFFFF80; | ||
112 | msr_hi &= 0xFFFFFF80; | ||
113 | msr_lo |= pctg_lo; | ||
114 | msr_hi |= pctg_hi; | ||
115 | wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); | ||
116 | |||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | |||
121 | /** | ||
122 | * longrun_verify_poliy - verifies a new CPUFreq policy | ||
123 | * @policy: the policy to verify | ||
124 | * | ||
125 | * Validates a new CPUFreq policy. This function has to be called with | ||
126 | * cpufreq_driver locked. | ||
127 | */ | ||
128 | static int longrun_verify_policy(struct cpufreq_policy *policy) | ||
129 | { | ||
130 | if (!policy) | ||
131 | return -EINVAL; | ||
132 | |||
133 | policy->cpu = 0; | ||
134 | cpufreq_verify_within_limits(policy, | ||
135 | policy->cpuinfo.min_freq, | ||
136 | policy->cpuinfo.max_freq); | ||
137 | |||
138 | if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && | ||
139 | (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) | ||
140 | return -EINVAL; | ||
141 | |||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static unsigned int longrun_get(unsigned int cpu) | ||
146 | { | ||
147 | u32 eax, ebx, ecx, edx; | ||
148 | |||
149 | if (cpu) | ||
150 | return 0; | ||
151 | |||
152 | cpuid(0x80860007, &eax, &ebx, &ecx, &edx); | ||
153 | dprintk("cpuid eax is %u\n", eax); | ||
154 | |||
155 | return (eax * 1000); | ||
156 | } | ||
157 | |||
158 | /** | ||
159 | * longrun_determine_freqs - determines the lowest and highest possible core frequency | ||
160 | * @low_freq: an int to put the lowest frequency into | ||
161 | * @high_freq: an int to put the highest frequency into | ||
162 | * | ||
163 | * Determines the lowest and highest possible core frequencies on this CPU. | ||
164 | * This is necessary to calculate the performance percentage according to | ||
165 | * TMTA rules: | ||
166 | * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq) | ||
167 | */ | ||
168 | static unsigned int __init longrun_determine_freqs(unsigned int *low_freq, | ||
169 | unsigned int *high_freq) | ||
170 | { | ||
171 | u32 msr_lo, msr_hi; | ||
172 | u32 save_lo, save_hi; | ||
173 | u32 eax, ebx, ecx, edx; | ||
174 | u32 try_hi; | ||
175 | struct cpuinfo_x86 *c = cpu_data; | ||
176 | |||
177 | if (!low_freq || !high_freq) | ||
178 | return -EINVAL; | ||
179 | |||
180 | if (cpu_has(c, X86_FEATURE_LRTI)) { | ||
181 | /* if the LongRun Table Interface is present, the | ||
182 | * detection is a bit easier: | ||
183 | * For minimum frequency, read out the maximum | ||
184 | * level (msr_hi), write that into "currently | ||
185 | * selected level", and read out the frequency. | ||
186 | * For maximum frequency, read out level zero. | ||
187 | */ | ||
188 | /* minimum */ | ||
189 | rdmsr(MSR_TMTA_LRTI_READOUT, msr_lo, msr_hi); | ||
190 | wrmsr(MSR_TMTA_LRTI_READOUT, msr_hi, msr_hi); | ||
191 | rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); | ||
192 | *low_freq = msr_lo * 1000; /* to kHz */ | ||
193 | |||
194 | /* maximum */ | ||
195 | wrmsr(MSR_TMTA_LRTI_READOUT, 0, msr_hi); | ||
196 | rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); | ||
197 | *high_freq = msr_lo * 1000; /* to kHz */ | ||
198 | |||
199 | dprintk("longrun table interface told %u - %u kHz\n", *low_freq, *high_freq); | ||
200 | |||
201 | if (*low_freq > *high_freq) | ||
202 | *low_freq = *high_freq; | ||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | /* set the upper border to the value determined during TSC init */ | ||
207 | *high_freq = (cpu_khz / 1000); | ||
208 | *high_freq = *high_freq * 1000; | ||
209 | dprintk("high frequency is %u kHz\n", *high_freq); | ||
210 | |||
211 | /* get current borders */ | ||
212 | rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); | ||
213 | save_lo = msr_lo & 0x0000007F; | ||
214 | save_hi = msr_hi & 0x0000007F; | ||
215 | |||
216 | /* if current perf_pctg is larger than 90%, we need to decrease the | ||
217 | * upper limit to make the calculation more accurate. | ||
218 | */ | ||
219 | cpuid(0x80860007, &eax, &ebx, &ecx, &edx); | ||
220 | /* try decreasing in 10% steps, some processors react only | ||
221 | * on some barrier values */ | ||
222 | for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -=10) { | ||
223 | /* set to 0 to try_hi perf_pctg */ | ||
224 | msr_lo &= 0xFFFFFF80; | ||
225 | msr_hi &= 0xFFFFFF80; | ||
226 | msr_hi |= try_hi; | ||
227 | wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); | ||
228 | |||
229 | /* read out current core MHz and current perf_pctg */ | ||
230 | cpuid(0x80860007, &eax, &ebx, &ecx, &edx); | ||
231 | |||
232 | /* restore values */ | ||
233 | wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi); | ||
234 | } | ||
235 | dprintk("percentage is %u %%, freq is %u MHz\n", ecx, eax); | ||
236 | |||
237 | /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) | ||
238 | * eqals | ||
239 | * low_freq * ( 1 - perf_pctg) = (cur_freq - high_freq * perf_pctg) | ||
240 | * | ||
241 | * high_freq * perf_pctg is stored tempoarily into "ebx". | ||
242 | */ | ||
243 | ebx = (((cpu_khz / 1000) * ecx) / 100); /* to MHz */ | ||
244 | |||
245 | if ((ecx > 95) || (ecx == 0) || (eax < ebx)) | ||
246 | return -EIO; | ||
247 | |||
248 | edx = (eax - ebx) / (100 - ecx); | ||
249 | *low_freq = edx * 1000; /* back to kHz */ | ||
250 | |||
251 | dprintk("low frequency is %u kHz\n", *low_freq); | ||
252 | |||
253 | if (*low_freq > *high_freq) | ||
254 | *low_freq = *high_freq; | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | |||
260 | static int __init longrun_cpu_init(struct cpufreq_policy *policy) | ||
261 | { | ||
262 | int result = 0; | ||
263 | |||
264 | /* capability check */ | ||
265 | if (policy->cpu != 0) | ||
266 | return -ENODEV; | ||
267 | |||
268 | /* detect low and high frequency */ | ||
269 | result = longrun_determine_freqs(&longrun_low_freq, &longrun_high_freq); | ||
270 | if (result) | ||
271 | return result; | ||
272 | |||
273 | /* cpuinfo and default policy values */ | ||
274 | policy->cpuinfo.min_freq = longrun_low_freq; | ||
275 | policy->cpuinfo.max_freq = longrun_high_freq; | ||
276 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
277 | longrun_get_policy(policy); | ||
278 | |||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | |||
283 | static struct cpufreq_driver longrun_driver = { | ||
284 | .flags = CPUFREQ_CONST_LOOPS, | ||
285 | .verify = longrun_verify_policy, | ||
286 | .setpolicy = longrun_set_policy, | ||
287 | .get = longrun_get, | ||
288 | .init = longrun_cpu_init, | ||
289 | .name = "longrun", | ||
290 | .owner = THIS_MODULE, | ||
291 | }; | ||
292 | |||
293 | |||
294 | /** | ||
295 | * longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver | ||
296 | * | ||
297 | * Initializes the LongRun support. | ||
298 | */ | ||
299 | static int __init longrun_init(void) | ||
300 | { | ||
301 | struct cpuinfo_x86 *c = cpu_data; | ||
302 | |||
303 | if (c->x86_vendor != X86_VENDOR_TRANSMETA || | ||
304 | !cpu_has(c, X86_FEATURE_LONGRUN)) | ||
305 | return -ENODEV; | ||
306 | |||
307 | return cpufreq_register_driver(&longrun_driver); | ||
308 | } | ||
309 | |||
310 | |||
311 | /** | ||
312 | * longrun_exit - unregisters LongRun support | ||
313 | */ | ||
314 | static void __exit longrun_exit(void) | ||
315 | { | ||
316 | cpufreq_unregister_driver(&longrun_driver); | ||
317 | } | ||
318 | |||
319 | |||
320 | MODULE_AUTHOR ("Dominik Brodowski <linux@brodo.de>"); | ||
321 | MODULE_DESCRIPTION ("LongRun driver for Transmeta Crusoe and Efficeon processors."); | ||
322 | MODULE_LICENSE ("GPL"); | ||
323 | |||
324 | module_init(longrun_init); | ||
325 | module_exit(longrun_exit); | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c new file mode 100644 index 000000000000..4c76b511e194 --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -0,0 +1,316 @@ | |||
1 | /* | ||
2 | * Pentium 4/Xeon CPU on demand clock modulation/speed scaling | ||
3 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> | ||
4 | * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com> | ||
5 | * (C) 2002 Arjan van de Ven <arjanv@redhat.com> | ||
6 | * (C) 2002 Tora T. Engstad | ||
7 | * All Rights Reserved | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; either version | ||
12 | * 2 of the License, or (at your option) any later version. | ||
13 | * | ||
14 | * The author(s) of this software shall not be held liable for damages | ||
15 | * of any nature resulting due to the use of this software. This | ||
16 | * software is provided AS-IS with no warranties. | ||
17 | * | ||
18 | * Date Errata Description | ||
19 | * 20020525 N44, O17 12.5% or 25% DC causes lockup | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/smp.h> | ||
27 | #include <linux/cpufreq.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/cpumask.h> | ||
30 | |||
31 | #include <asm/processor.h> | ||
32 | #include <asm/msr.h> | ||
33 | #include <asm/timex.h> | ||
34 | |||
35 | #include "speedstep-lib.h" | ||
36 | |||
37 | #define PFX "p4-clockmod: " | ||
38 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "p4-clockmod", msg) | ||
39 | |||
40 | /* | ||
41 | * Duty Cycle (3bits), note DC_DISABLE is not specified in | ||
42 | * intel docs i just use it to mean disable | ||
43 | */ | ||
44 | enum { | ||
45 | DC_RESV, DC_DFLT, DC_25PT, DC_38PT, DC_50PT, | ||
46 | DC_64PT, DC_75PT, DC_88PT, DC_DISABLE | ||
47 | }; | ||
48 | |||
49 | #define DC_ENTRIES 8 | ||
50 | |||
51 | |||
52 | static int has_N44_O17_errata[NR_CPUS]; | ||
53 | static unsigned int stock_freq; | ||
54 | static struct cpufreq_driver p4clockmod_driver; | ||
55 | static unsigned int cpufreq_p4_get(unsigned int cpu); | ||
56 | |||
57 | static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) | ||
58 | { | ||
59 | u32 l, h; | ||
60 | |||
61 | if (!cpu_online(cpu) || (newstate > DC_DISABLE) || (newstate == DC_RESV)) | ||
62 | return -EINVAL; | ||
63 | |||
64 | rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); | ||
65 | |||
66 | if (l & 0x01) | ||
67 | dprintk("CPU#%d currently thermal throttled\n", cpu); | ||
68 | |||
69 | if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT)) | ||
70 | newstate = DC_38PT; | ||
71 | |||
72 | rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); | ||
73 | if (newstate == DC_DISABLE) { | ||
74 | dprintk("CPU#%d disabling modulation\n", cpu); | ||
75 | wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); | ||
76 | } else { | ||
77 | dprintk("CPU#%d setting duty cycle to %d%%\n", | ||
78 | cpu, ((125 * newstate) / 10)); | ||
79 | /* bits 63 - 5 : reserved | ||
80 | * bit 4 : enable/disable | ||
81 | * bits 3-1 : duty cycle | ||
82 | * bit 0 : reserved | ||
83 | */ | ||
84 | l = (l & ~14); | ||
85 | l = l | (1<<4) | ((newstate & 0x7)<<1); | ||
86 | wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h); | ||
87 | } | ||
88 | |||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | |||
93 | static struct cpufreq_frequency_table p4clockmod_table[] = { | ||
94 | {DC_RESV, CPUFREQ_ENTRY_INVALID}, | ||
95 | {DC_DFLT, 0}, | ||
96 | {DC_25PT, 0}, | ||
97 | {DC_38PT, 0}, | ||
98 | {DC_50PT, 0}, | ||
99 | {DC_64PT, 0}, | ||
100 | {DC_75PT, 0}, | ||
101 | {DC_88PT, 0}, | ||
102 | {DC_DISABLE, 0}, | ||
103 | {DC_RESV, CPUFREQ_TABLE_END}, | ||
104 | }; | ||
105 | |||
106 | |||
107 | static int cpufreq_p4_target(struct cpufreq_policy *policy, | ||
108 | unsigned int target_freq, | ||
109 | unsigned int relation) | ||
110 | { | ||
111 | unsigned int newstate = DC_RESV; | ||
112 | struct cpufreq_freqs freqs; | ||
113 | int i; | ||
114 | |||
115 | if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], target_freq, relation, &newstate)) | ||
116 | return -EINVAL; | ||
117 | |||
118 | freqs.old = cpufreq_p4_get(policy->cpu); | ||
119 | freqs.new = stock_freq * p4clockmod_table[newstate].index / 8; | ||
120 | |||
121 | if (freqs.new == freqs.old) | ||
122 | return 0; | ||
123 | |||
124 | /* notifiers */ | ||
125 | for_each_cpu_mask(i, policy->cpus) { | ||
126 | freqs.cpu = i; | ||
127 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
128 | } | ||
129 | |||
130 | /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software | ||
131 | * Developer's Manual, Volume 3 | ||
132 | */ | ||
133 | for_each_cpu_mask(i, policy->cpus) | ||
134 | cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); | ||
135 | |||
136 | /* notifiers */ | ||
137 | for_each_cpu_mask(i, policy->cpus) { | ||
138 | freqs.cpu = i; | ||
139 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
140 | } | ||
141 | |||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | |||
146 | static int cpufreq_p4_verify(struct cpufreq_policy *policy) | ||
147 | { | ||
148 | return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]); | ||
149 | } | ||
150 | |||
151 | |||
152 | static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) | ||
153 | { | ||
154 | if (c->x86 == 0x06) { | ||
155 | if (cpu_has(c, X86_FEATURE_EST)) | ||
156 | printk(KERN_WARNING PFX "Warning: EST-capable CPU detected. " | ||
157 | "The acpi-cpufreq module offers voltage scaling" | ||
158 | " in addition of frequency scaling. You should use " | ||
159 | "that instead of p4-clockmod, if possible.\n"); | ||
160 | switch (c->x86_model) { | ||
161 | case 0x0E: /* Core */ | ||
162 | case 0x0F: /* Core Duo */ | ||
163 | p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; | ||
164 | return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PCORE); | ||
165 | case 0x0D: /* Pentium M (Dothan) */ | ||
166 | p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; | ||
167 | /* fall through */ | ||
168 | case 0x09: /* Pentium M (Banias) */ | ||
169 | return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM); | ||
170 | } | ||
171 | } | ||
172 | |||
173 | if (c->x86 != 0xF) { | ||
174 | printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <cpufreq@lists.linux.org.uk>\n"); | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | /* on P-4s, the TSC runs with constant frequency independent whether | ||
179 | * throttling is active or not. */ | ||
180 | p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; | ||
181 | |||
182 | if (speedstep_detect_processor() == SPEEDSTEP_PROCESSOR_P4M) { | ||
183 | printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. " | ||
184 | "The speedstep-ich or acpi cpufreq modules offer " | ||
185 | "voltage scaling in addition of frequency scaling. " | ||
186 | "You should use either one instead of p4-clockmod, " | ||
187 | "if possible.\n"); | ||
188 | return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_P4M); | ||
189 | } | ||
190 | |||
191 | return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_P4D); | ||
192 | } | ||
193 | |||
194 | |||
195 | |||
196 | static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) | ||
197 | { | ||
198 | struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; | ||
199 | int cpuid = 0; | ||
200 | unsigned int i; | ||
201 | |||
202 | #ifdef CONFIG_SMP | ||
203 | policy->cpus = cpu_sibling_map[policy->cpu]; | ||
204 | #endif | ||
205 | |||
206 | /* Errata workaround */ | ||
207 | cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; | ||
208 | switch (cpuid) { | ||
209 | case 0x0f07: | ||
210 | case 0x0f0a: | ||
211 | case 0x0f11: | ||
212 | case 0x0f12: | ||
213 | has_N44_O17_errata[policy->cpu] = 1; | ||
214 | dprintk("has errata -- disabling low frequencies\n"); | ||
215 | } | ||
216 | |||
217 | /* get max frequency */ | ||
218 | stock_freq = cpufreq_p4_get_frequency(c); | ||
219 | if (!stock_freq) | ||
220 | return -EINVAL; | ||
221 | |||
222 | /* table init */ | ||
223 | for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { | ||
224 | if ((i<2) && (has_N44_O17_errata[policy->cpu])) | ||
225 | p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
226 | else | ||
227 | p4clockmod_table[i].frequency = (stock_freq * i)/8; | ||
228 | } | ||
229 | cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu); | ||
230 | |||
231 | /* cpuinfo and default policy values */ | ||
232 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
233 | policy->cpuinfo.transition_latency = 1000000; /* assumed */ | ||
234 | policy->cur = stock_freq; | ||
235 | |||
236 | return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]); | ||
237 | } | ||
238 | |||
239 | |||
240 | static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy) | ||
241 | { | ||
242 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | static unsigned int cpufreq_p4_get(unsigned int cpu) | ||
247 | { | ||
248 | u32 l, h; | ||
249 | |||
250 | rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); | ||
251 | |||
252 | if (l & 0x10) { | ||
253 | l = l >> 1; | ||
254 | l &= 0x7; | ||
255 | } else | ||
256 | l = DC_DISABLE; | ||
257 | |||
258 | if (l != DC_DISABLE) | ||
259 | return (stock_freq * l / 8); | ||
260 | |||
261 | return stock_freq; | ||
262 | } | ||
263 | |||
264 | static struct freq_attr* p4clockmod_attr[] = { | ||
265 | &cpufreq_freq_attr_scaling_available_freqs, | ||
266 | NULL, | ||
267 | }; | ||
268 | |||
269 | static struct cpufreq_driver p4clockmod_driver = { | ||
270 | .verify = cpufreq_p4_verify, | ||
271 | .target = cpufreq_p4_target, | ||
272 | .init = cpufreq_p4_cpu_init, | ||
273 | .exit = cpufreq_p4_cpu_exit, | ||
274 | .get = cpufreq_p4_get, | ||
275 | .name = "p4-clockmod", | ||
276 | .owner = THIS_MODULE, | ||
277 | .attr = p4clockmod_attr, | ||
278 | }; | ||
279 | |||
280 | |||
281 | static int __init cpufreq_p4_init(void) | ||
282 | { | ||
283 | struct cpuinfo_x86 *c = cpu_data; | ||
284 | int ret; | ||
285 | |||
286 | /* | ||
287 | * THERM_CONTROL is architectural for IA32 now, so | ||
288 | * we can rely on the capability checks | ||
289 | */ | ||
290 | if (c->x86_vendor != X86_VENDOR_INTEL) | ||
291 | return -ENODEV; | ||
292 | |||
293 | if (!test_bit(X86_FEATURE_ACPI, c->x86_capability) || | ||
294 | !test_bit(X86_FEATURE_ACC, c->x86_capability)) | ||
295 | return -ENODEV; | ||
296 | |||
297 | ret = cpufreq_register_driver(&p4clockmod_driver); | ||
298 | if (!ret) | ||
299 | printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock Modulation available\n"); | ||
300 | |||
301 | return (ret); | ||
302 | } | ||
303 | |||
304 | |||
305 | static void __exit cpufreq_p4_exit(void) | ||
306 | { | ||
307 | cpufreq_unregister_driver(&p4clockmod_driver); | ||
308 | } | ||
309 | |||
310 | |||
311 | MODULE_AUTHOR ("Zwane Mwaikambo <zwane@commfireservices.com>"); | ||
312 | MODULE_DESCRIPTION ("cpufreq driver for Pentium(TM) 4/Xeon(TM)"); | ||
313 | MODULE_LICENSE ("GPL"); | ||
314 | |||
315 | late_initcall(cpufreq_p4_init); | ||
316 | module_exit(cpufreq_p4_exit); | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c new file mode 100644 index 000000000000..f89524051e4a --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c | |||
@@ -0,0 +1,256 @@ | |||
1 | /* | ||
2 | * This file was based upon code in Powertweak Linux (http://powertweak.sf.net) | ||
3 | * (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä, Dominik Brodowski. | ||
4 | * | ||
5 | * Licensed under the terms of the GNU GPL License version 2. | ||
6 | * | ||
7 | * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/cpufreq.h> | ||
14 | #include <linux/ioport.h> | ||
15 | #include <linux/slab.h> | ||
16 | |||
17 | #include <asm/msr.h> | ||
18 | #include <asm/timex.h> | ||
19 | #include <asm/io.h> | ||
20 | |||
21 | |||
22 | #define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long | ||
23 | as it is unused */ | ||
24 | |||
25 | static unsigned int busfreq; /* FSB, in 10 kHz */ | ||
26 | static unsigned int max_multiplier; | ||
27 | |||
28 | |||
29 | /* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */ | ||
30 | static struct cpufreq_frequency_table clock_ratio[] = { | ||
31 | {45, /* 000 -> 4.5x */ 0}, | ||
32 | {50, /* 001 -> 5.0x */ 0}, | ||
33 | {40, /* 010 -> 4.0x */ 0}, | ||
34 | {55, /* 011 -> 5.5x */ 0}, | ||
35 | {20, /* 100 -> 2.0x */ 0}, | ||
36 | {30, /* 101 -> 3.0x */ 0}, | ||
37 | {60, /* 110 -> 6.0x */ 0}, | ||
38 | {35, /* 111 -> 3.5x */ 0}, | ||
39 | {0, CPUFREQ_TABLE_END} | ||
40 | }; | ||
41 | |||
42 | |||
43 | /** | ||
44 | * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier | ||
45 | * | ||
46 | * Returns the current setting of the frequency multiplier. Core clock | ||
47 | * speed is frequency of the Front-Side Bus multiplied with this value. | ||
48 | */ | ||
49 | static int powernow_k6_get_cpu_multiplier(void) | ||
50 | { | ||
51 | u64 invalue = 0; | ||
52 | u32 msrval; | ||
53 | |||
54 | msrval = POWERNOW_IOPORT + 0x1; | ||
55 | wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ | ||
56 | invalue=inl(POWERNOW_IOPORT + 0x8); | ||
57 | msrval = POWERNOW_IOPORT + 0x0; | ||
58 | wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ | ||
59 | |||
60 | return clock_ratio[(invalue >> 5)&7].index; | ||
61 | } | ||
62 | |||
63 | |||
64 | /** | ||
65 | * powernow_k6_set_state - set the PowerNow! multiplier | ||
66 | * @best_i: clock_ratio[best_i] is the target multiplier | ||
67 | * | ||
68 | * Tries to change the PowerNow! multiplier | ||
69 | */ | ||
70 | static void powernow_k6_set_state (unsigned int best_i) | ||
71 | { | ||
72 | unsigned long outvalue=0, invalue=0; | ||
73 | unsigned long msrval; | ||
74 | struct cpufreq_freqs freqs; | ||
75 | |||
76 | if (clock_ratio[best_i].index > max_multiplier) { | ||
77 | printk(KERN_ERR "cpufreq: invalid target frequency\n"); | ||
78 | return; | ||
79 | } | ||
80 | |||
81 | freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); | ||
82 | freqs.new = busfreq * clock_ratio[best_i].index; | ||
83 | freqs.cpu = 0; /* powernow-k6.c is UP only driver */ | ||
84 | |||
85 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
86 | |||
87 | /* we now need to transform best_i to the BVC format, see AMD#23446 */ | ||
88 | |||
89 | outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5); | ||
90 | |||
91 | msrval = POWERNOW_IOPORT + 0x1; | ||
92 | wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ | ||
93 | invalue=inl(POWERNOW_IOPORT + 0x8); | ||
94 | invalue = invalue & 0xf; | ||
95 | outvalue = outvalue | invalue; | ||
96 | outl(outvalue ,(POWERNOW_IOPORT + 0x8)); | ||
97 | msrval = POWERNOW_IOPORT + 0x0; | ||
98 | wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ | ||
99 | |||
100 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
101 | |||
102 | return; | ||
103 | } | ||
104 | |||
105 | |||
106 | /** | ||
107 | * powernow_k6_verify - verifies a new CPUfreq policy | ||
108 | * @policy: new policy | ||
109 | * | ||
110 | * Policy must be within lowest and highest possible CPU Frequency, | ||
111 | * and at least one possible state must be within min and max. | ||
112 | */ | ||
113 | static int powernow_k6_verify(struct cpufreq_policy *policy) | ||
114 | { | ||
115 | return cpufreq_frequency_table_verify(policy, &clock_ratio[0]); | ||
116 | } | ||
117 | |||
118 | |||
119 | /** | ||
120 | * powernow_k6_setpolicy - sets a new CPUFreq policy | ||
121 | * @policy: new policy | ||
122 | * @target_freq: the target frequency | ||
123 | * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) | ||
124 | * | ||
125 | * sets a new CPUFreq policy | ||
126 | */ | ||
127 | static int powernow_k6_target (struct cpufreq_policy *policy, | ||
128 | unsigned int target_freq, | ||
129 | unsigned int relation) | ||
130 | { | ||
131 | unsigned int newstate = 0; | ||
132 | |||
133 | if (cpufreq_frequency_table_target(policy, &clock_ratio[0], target_freq, relation, &newstate)) | ||
134 | return -EINVAL; | ||
135 | |||
136 | powernow_k6_set_state(newstate); | ||
137 | |||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | |||
142 | static int powernow_k6_cpu_init(struct cpufreq_policy *policy) | ||
143 | { | ||
144 | unsigned int i; | ||
145 | int result; | ||
146 | |||
147 | if (policy->cpu != 0) | ||
148 | return -ENODEV; | ||
149 | |||
150 | /* get frequencies */ | ||
151 | max_multiplier = powernow_k6_get_cpu_multiplier(); | ||
152 | busfreq = cpu_khz / max_multiplier; | ||
153 | |||
154 | /* table init */ | ||
155 | for (i=0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { | ||
156 | if (clock_ratio[i].index > max_multiplier) | ||
157 | clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
158 | else | ||
159 | clock_ratio[i].frequency = busfreq * clock_ratio[i].index; | ||
160 | } | ||
161 | |||
162 | /* cpuinfo and default policy values */ | ||
163 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
164 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
165 | policy->cur = busfreq * max_multiplier; | ||
166 | |||
167 | result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio); | ||
168 | if (result) | ||
169 | return (result); | ||
170 | |||
171 | cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu); | ||
172 | |||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | |||
177 | static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) | ||
178 | { | ||
179 | unsigned int i; | ||
180 | for (i=0; i<8; i++) { | ||
181 | if (i==max_multiplier) | ||
182 | powernow_k6_set_state(i); | ||
183 | } | ||
184 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | static unsigned int powernow_k6_get(unsigned int cpu) | ||
189 | { | ||
190 | return busfreq * powernow_k6_get_cpu_multiplier(); | ||
191 | } | ||
192 | |||
193 | static struct freq_attr* powernow_k6_attr[] = { | ||
194 | &cpufreq_freq_attr_scaling_available_freqs, | ||
195 | NULL, | ||
196 | }; | ||
197 | |||
198 | static struct cpufreq_driver powernow_k6_driver = { | ||
199 | .verify = powernow_k6_verify, | ||
200 | .target = powernow_k6_target, | ||
201 | .init = powernow_k6_cpu_init, | ||
202 | .exit = powernow_k6_cpu_exit, | ||
203 | .get = powernow_k6_get, | ||
204 | .name = "powernow-k6", | ||
205 | .owner = THIS_MODULE, | ||
206 | .attr = powernow_k6_attr, | ||
207 | }; | ||
208 | |||
209 | |||
210 | /** | ||
211 | * powernow_k6_init - initializes the k6 PowerNow! CPUFreq driver | ||
212 | * | ||
213 | * Initializes the K6 PowerNow! support. Returns -ENODEV on unsupported | ||
214 | * devices, -EINVAL or -ENOMEM on problems during initiatization, and zero | ||
215 | * on success. | ||
216 | */ | ||
217 | static int __init powernow_k6_init(void) | ||
218 | { | ||
219 | struct cpuinfo_x86 *c = cpu_data; | ||
220 | |||
221 | if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) || | ||
222 | ((c->x86_model != 12) && (c->x86_model != 13))) | ||
223 | return -ENODEV; | ||
224 | |||
225 | if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) { | ||
226 | printk("cpufreq: PowerNow IOPORT region already used.\n"); | ||
227 | return -EIO; | ||
228 | } | ||
229 | |||
230 | if (cpufreq_register_driver(&powernow_k6_driver)) { | ||
231 | release_region (POWERNOW_IOPORT, 16); | ||
232 | return -EINVAL; | ||
233 | } | ||
234 | |||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | |||
239 | /** | ||
240 | * powernow_k6_exit - unregisters AMD K6-2+/3+ PowerNow! support | ||
241 | * | ||
242 | * Unregisters AMD K6-2+ / K6-3+ PowerNow! support. | ||
243 | */ | ||
244 | static void __exit powernow_k6_exit(void) | ||
245 | { | ||
246 | cpufreq_unregister_driver(&powernow_k6_driver); | ||
247 | release_region (POWERNOW_IOPORT, 16); | ||
248 | } | ||
249 | |||
250 | |||
251 | MODULE_AUTHOR ("Arjan van de Ven <arjanv@redhat.com>, Dave Jones <davej@codemonkey.org.uk>, Dominik Brodowski <linux@brodo.de>"); | ||
252 | MODULE_DESCRIPTION ("PowerNow! driver for AMD K6-2+ / K6-3+ processors."); | ||
253 | MODULE_LICENSE ("GPL"); | ||
254 | |||
255 | module_init(powernow_k6_init); | ||
256 | module_exit(powernow_k6_exit); | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c new file mode 100644 index 000000000000..ca3e1d341889 --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c | |||
@@ -0,0 +1,703 @@ | |||
1 | /* | ||
2 | * AMD K7 Powernow driver. | ||
3 | * (C) 2003 Dave Jones <davej@codemonkey.org.uk> on behalf of SuSE Labs. | ||
4 | * (C) 2003-2004 Dave Jones <davej@redhat.com> | ||
5 | * | ||
6 | * Licensed under the terms of the GNU GPL License version 2. | ||
7 | * Based upon datasheets & sample CPUs kindly provided by AMD. | ||
8 | * | ||
9 | * Errata 5: Processor may fail to execute a FID/VID change in presence of interrupt. | ||
10 | * - We cli/sti on stepping A0 CPUs around the FID/VID transition. | ||
11 | * Errata 15: Processors with half frequency multipliers may hang upon wakeup from disconnect. | ||
12 | * - We disable half multipliers if ACPI is used on A0 stepping CPUs. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/moduleparam.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/cpufreq.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/dmi.h> | ||
23 | |||
24 | #include <asm/msr.h> | ||
25 | #include <asm/timer.h> | ||
26 | #include <asm/timex.h> | ||
27 | #include <asm/io.h> | ||
28 | #include <asm/system.h> | ||
29 | |||
30 | #ifdef CONFIG_X86_POWERNOW_K7_ACPI | ||
31 | #include <linux/acpi.h> | ||
32 | #include <acpi/processor.h> | ||
33 | #endif | ||
34 | |||
35 | #include "powernow-k7.h" | ||
36 | |||
37 | #define PFX "powernow: " | ||
38 | |||
39 | |||
40 | struct psb_s { | ||
41 | u8 signature[10]; | ||
42 | u8 tableversion; | ||
43 | u8 flags; | ||
44 | u16 settlingtime; | ||
45 | u8 reserved1; | ||
46 | u8 numpst; | ||
47 | }; | ||
48 | |||
49 | struct pst_s { | ||
50 | u32 cpuid; | ||
51 | u8 fsbspeed; | ||
52 | u8 maxfid; | ||
53 | u8 startvid; | ||
54 | u8 numpstates; | ||
55 | }; | ||
56 | |||
57 | #ifdef CONFIG_X86_POWERNOW_K7_ACPI | ||
58 | union powernow_acpi_control_t { | ||
59 | struct { | ||
60 | unsigned long fid:5, | ||
61 | vid:5, | ||
62 | sgtc:20, | ||
63 | res1:2; | ||
64 | } bits; | ||
65 | unsigned long val; | ||
66 | }; | ||
67 | #endif | ||
68 | |||
69 | #ifdef CONFIG_CPU_FREQ_DEBUG | ||
70 | /* divide by 1000 to get VCore voltage in V. */ | ||
71 | static const int mobile_vid_table[32] = { | ||
72 | 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, | ||
73 | 1600, 1550, 1500, 1450, 1400, 1350, 1300, 0, | ||
74 | 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, | ||
75 | 1075, 1050, 1025, 1000, 975, 950, 925, 0, | ||
76 | }; | ||
77 | #endif | ||
78 | |||
79 | /* divide by 10 to get FID. */ | ||
80 | static const int fid_codes[32] = { | ||
81 | 110, 115, 120, 125, 50, 55, 60, 65, | ||
82 | 70, 75, 80, 85, 90, 95, 100, 105, | ||
83 | 30, 190, 40, 200, 130, 135, 140, 210, | ||
84 | 150, 225, 160, 165, 170, 180, -1, -1, | ||
85 | }; | ||
86 | |||
87 | /* This parameter is used in order to force ACPI instead of legacy method for | ||
88 | * configuration purpose. | ||
89 | */ | ||
90 | |||
91 | static int acpi_force; | ||
92 | |||
93 | static struct cpufreq_frequency_table *powernow_table; | ||
94 | |||
95 | static unsigned int can_scale_bus; | ||
96 | static unsigned int can_scale_vid; | ||
97 | static unsigned int minimum_speed=-1; | ||
98 | static unsigned int maximum_speed; | ||
99 | static unsigned int number_scales; | ||
100 | static unsigned int fsb; | ||
101 | static unsigned int latency; | ||
102 | static char have_a0; | ||
103 | |||
104 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k7", msg) | ||
105 | |||
106 | static int check_fsb(unsigned int fsbspeed) | ||
107 | { | ||
108 | int delta; | ||
109 | unsigned int f = fsb / 1000; | ||
110 | |||
111 | delta = (fsbspeed > f) ? fsbspeed - f : f - fsbspeed; | ||
112 | return (delta < 5); | ||
113 | } | ||
114 | |||
115 | static int check_powernow(void) | ||
116 | { | ||
117 | struct cpuinfo_x86 *c = cpu_data; | ||
118 | unsigned int maxei, eax, ebx, ecx, edx; | ||
119 | |||
120 | if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 !=6)) { | ||
121 | #ifdef MODULE | ||
122 | printk (KERN_INFO PFX "This module only works with AMD K7 CPUs\n"); | ||
123 | #endif | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | /* Get maximum capabilities */ | ||
128 | maxei = cpuid_eax (0x80000000); | ||
129 | if (maxei < 0x80000007) { /* Any powernow info ? */ | ||
130 | #ifdef MODULE | ||
131 | printk (KERN_INFO PFX "No powernow capabilities detected\n"); | ||
132 | #endif | ||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | if ((c->x86_model == 6) && (c->x86_mask == 0)) { | ||
137 | printk (KERN_INFO PFX "K7 660[A0] core detected, enabling errata workarounds\n"); | ||
138 | have_a0 = 1; | ||
139 | } | ||
140 | |||
141 | cpuid(0x80000007, &eax, &ebx, &ecx, &edx); | ||
142 | |||
143 | /* Check we can actually do something before we say anything.*/ | ||
144 | if (!(edx & (1 << 1 | 1 << 2))) | ||
145 | return 0; | ||
146 | |||
147 | printk (KERN_INFO PFX "PowerNOW! Technology present. Can scale: "); | ||
148 | |||
149 | if (edx & 1 << 1) { | ||
150 | printk ("frequency"); | ||
151 | can_scale_bus=1; | ||
152 | } | ||
153 | |||
154 | if ((edx & (1 << 1 | 1 << 2)) == 0x6) | ||
155 | printk (" and "); | ||
156 | |||
157 | if (edx & 1 << 2) { | ||
158 | printk ("voltage"); | ||
159 | can_scale_vid=1; | ||
160 | } | ||
161 | |||
162 | printk (".\n"); | ||
163 | return 1; | ||
164 | } | ||
165 | |||
166 | |||
167 | static int get_ranges (unsigned char *pst) | ||
168 | { | ||
169 | unsigned int j; | ||
170 | unsigned int speed; | ||
171 | u8 fid, vid; | ||
172 | |||
173 | powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * (number_scales + 1)), GFP_KERNEL); | ||
174 | if (!powernow_table) | ||
175 | return -ENOMEM; | ||
176 | |||
177 | for (j=0 ; j < number_scales; j++) { | ||
178 | fid = *pst++; | ||
179 | |||
180 | powernow_table[j].frequency = (fsb * fid_codes[fid]) / 10; | ||
181 | powernow_table[j].index = fid; /* lower 8 bits */ | ||
182 | |||
183 | speed = powernow_table[j].frequency; | ||
184 | |||
185 | if ((fid_codes[fid] % 10)==5) { | ||
186 | #ifdef CONFIG_X86_POWERNOW_K7_ACPI | ||
187 | if (have_a0 == 1) | ||
188 | powernow_table[j].frequency = CPUFREQ_ENTRY_INVALID; | ||
189 | #endif | ||
190 | } | ||
191 | |||
192 | if (speed < minimum_speed) | ||
193 | minimum_speed = speed; | ||
194 | if (speed > maximum_speed) | ||
195 | maximum_speed = speed; | ||
196 | |||
197 | vid = *pst++; | ||
198 | powernow_table[j].index |= (vid << 8); /* upper 8 bits */ | ||
199 | |||
200 | dprintk (" FID: 0x%x (%d.%dx [%dMHz]) " | ||
201 | "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, | ||
202 | fid_codes[fid] % 10, speed/1000, vid, | ||
203 | mobile_vid_table[vid]/1000, | ||
204 | mobile_vid_table[vid]%1000); | ||
205 | } | ||
206 | powernow_table[number_scales].frequency = CPUFREQ_TABLE_END; | ||
207 | powernow_table[number_scales].index = 0; | ||
208 | |||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | |||
213 | static void change_FID(int fid) | ||
214 | { | ||
215 | union msr_fidvidctl fidvidctl; | ||
216 | |||
217 | rdmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); | ||
218 | if (fidvidctl.bits.FID != fid) { | ||
219 | fidvidctl.bits.SGTC = latency; | ||
220 | fidvidctl.bits.FID = fid; | ||
221 | fidvidctl.bits.VIDC = 0; | ||
222 | fidvidctl.bits.FIDC = 1; | ||
223 | wrmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); | ||
224 | } | ||
225 | } | ||
226 | |||
227 | |||
228 | static void change_VID(int vid) | ||
229 | { | ||
230 | union msr_fidvidctl fidvidctl; | ||
231 | |||
232 | rdmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); | ||
233 | if (fidvidctl.bits.VID != vid) { | ||
234 | fidvidctl.bits.SGTC = latency; | ||
235 | fidvidctl.bits.VID = vid; | ||
236 | fidvidctl.bits.FIDC = 0; | ||
237 | fidvidctl.bits.VIDC = 1; | ||
238 | wrmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); | ||
239 | } | ||
240 | } | ||
241 | |||
242 | |||
243 | static void change_speed (unsigned int index) | ||
244 | { | ||
245 | u8 fid, vid; | ||
246 | struct cpufreq_freqs freqs; | ||
247 | union msr_fidvidstatus fidvidstatus; | ||
248 | int cfid; | ||
249 | |||
250 | /* fid are the lower 8 bits of the index we stored into | ||
251 | * the cpufreq frequency table in powernow_decode_bios, | ||
252 | * vid are the upper 8 bits. | ||
253 | */ | ||
254 | |||
255 | fid = powernow_table[index].index & 0xFF; | ||
256 | vid = (powernow_table[index].index & 0xFF00) >> 8; | ||
257 | |||
258 | freqs.cpu = 0; | ||
259 | |||
260 | rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val); | ||
261 | cfid = fidvidstatus.bits.CFID; | ||
262 | freqs.old = fsb * fid_codes[cfid] / 10; | ||
263 | |||
264 | freqs.new = powernow_table[index].frequency; | ||
265 | |||
266 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
267 | |||
268 | /* Now do the magic poking into the MSRs. */ | ||
269 | |||
270 | if (have_a0 == 1) /* A0 errata 5 */ | ||
271 | local_irq_disable(); | ||
272 | |||
273 | if (freqs.old > freqs.new) { | ||
274 | /* Going down, so change FID first */ | ||
275 | change_FID(fid); | ||
276 | change_VID(vid); | ||
277 | } else { | ||
278 | /* Going up, so change VID first */ | ||
279 | change_VID(vid); | ||
280 | change_FID(fid); | ||
281 | } | ||
282 | |||
283 | |||
284 | if (have_a0 == 1) | ||
285 | local_irq_enable(); | ||
286 | |||
287 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
288 | } | ||
289 | |||
290 | |||
291 | #ifdef CONFIG_X86_POWERNOW_K7_ACPI | ||
292 | |||
293 | static struct acpi_processor_performance *acpi_processor_perf; | ||
294 | |||
295 | static int powernow_acpi_init(void) | ||
296 | { | ||
297 | int i; | ||
298 | int retval = 0; | ||
299 | union powernow_acpi_control_t pc; | ||
300 | |||
301 | if (acpi_processor_perf != NULL && powernow_table != NULL) { | ||
302 | retval = -EINVAL; | ||
303 | goto err0; | ||
304 | } | ||
305 | |||
306 | acpi_processor_perf = kzalloc(sizeof(struct acpi_processor_performance), | ||
307 | GFP_KERNEL); | ||
308 | if (!acpi_processor_perf) { | ||
309 | retval = -ENOMEM; | ||
310 | goto err0; | ||
311 | } | ||
312 | |||
313 | if (acpi_processor_register_performance(acpi_processor_perf, 0)) { | ||
314 | retval = -EIO; | ||
315 | goto err1; | ||
316 | } | ||
317 | |||
318 | if (acpi_processor_perf->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) { | ||
319 | retval = -ENODEV; | ||
320 | goto err2; | ||
321 | } | ||
322 | |||
323 | if (acpi_processor_perf->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) { | ||
324 | retval = -ENODEV; | ||
325 | goto err2; | ||
326 | } | ||
327 | |||
328 | number_scales = acpi_processor_perf->state_count; | ||
329 | |||
330 | if (number_scales < 2) { | ||
331 | retval = -ENODEV; | ||
332 | goto err2; | ||
333 | } | ||
334 | |||
335 | powernow_table = kzalloc((number_scales + 1) * (sizeof(struct cpufreq_frequency_table)), GFP_KERNEL); | ||
336 | if (!powernow_table) { | ||
337 | retval = -ENOMEM; | ||
338 | goto err2; | ||
339 | } | ||
340 | |||
341 | pc.val = (unsigned long) acpi_processor_perf->states[0].control; | ||
342 | for (i = 0; i < number_scales; i++) { | ||
343 | u8 fid, vid; | ||
344 | struct acpi_processor_px *state = | ||
345 | &acpi_processor_perf->states[i]; | ||
346 | unsigned int speed, speed_mhz; | ||
347 | |||
348 | pc.val = (unsigned long) state->control; | ||
349 | dprintk ("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", | ||
350 | i, | ||
351 | (u32) state->core_frequency, | ||
352 | (u32) state->power, | ||
353 | (u32) state->transition_latency, | ||
354 | (u32) state->control, | ||
355 | pc.bits.sgtc); | ||
356 | |||
357 | vid = pc.bits.vid; | ||
358 | fid = pc.bits.fid; | ||
359 | |||
360 | powernow_table[i].frequency = fsb * fid_codes[fid] / 10; | ||
361 | powernow_table[i].index = fid; /* lower 8 bits */ | ||
362 | powernow_table[i].index |= (vid << 8); /* upper 8 bits */ | ||
363 | |||
364 | speed = powernow_table[i].frequency; | ||
365 | speed_mhz = speed / 1000; | ||
366 | |||
367 | /* processor_perflib will multiply the MHz value by 1000 to | ||
368 | * get a KHz value (e.g. 1266000). However, powernow-k7 works | ||
369 | * with true KHz values (e.g. 1266768). To ensure that all | ||
370 | * powernow frequencies are available, we must ensure that | ||
371 | * ACPI doesn't restrict them, so we round up the MHz value | ||
372 | * to ensure that perflib's computed KHz value is greater than | ||
373 | * or equal to powernow's KHz value. | ||
374 | */ | ||
375 | if (speed % 1000 > 0) | ||
376 | speed_mhz++; | ||
377 | |||
378 | if ((fid_codes[fid] % 10)==5) { | ||
379 | if (have_a0 == 1) | ||
380 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
381 | } | ||
382 | |||
383 | dprintk (" FID: 0x%x (%d.%dx [%dMHz]) " | ||
384 | "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, | ||
385 | fid_codes[fid] % 10, speed_mhz, vid, | ||
386 | mobile_vid_table[vid]/1000, | ||
387 | mobile_vid_table[vid]%1000); | ||
388 | |||
389 | if (state->core_frequency != speed_mhz) { | ||
390 | state->core_frequency = speed_mhz; | ||
391 | dprintk(" Corrected ACPI frequency to %d\n", | ||
392 | speed_mhz); | ||
393 | } | ||
394 | |||
395 | if (latency < pc.bits.sgtc) | ||
396 | latency = pc.bits.sgtc; | ||
397 | |||
398 | if (speed < minimum_speed) | ||
399 | minimum_speed = speed; | ||
400 | if (speed > maximum_speed) | ||
401 | maximum_speed = speed; | ||
402 | } | ||
403 | |||
404 | powernow_table[i].frequency = CPUFREQ_TABLE_END; | ||
405 | powernow_table[i].index = 0; | ||
406 | |||
407 | /* notify BIOS that we exist */ | ||
408 | acpi_processor_notify_smm(THIS_MODULE); | ||
409 | |||
410 | return 0; | ||
411 | |||
412 | err2: | ||
413 | acpi_processor_unregister_performance(acpi_processor_perf, 0); | ||
414 | err1: | ||
415 | kfree(acpi_processor_perf); | ||
416 | err0: | ||
417 | printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n"); | ||
418 | acpi_processor_perf = NULL; | ||
419 | return retval; | ||
420 | } | ||
421 | #else | ||
422 | static int powernow_acpi_init(void) | ||
423 | { | ||
424 | printk(KERN_INFO PFX "no support for ACPI processor found." | ||
425 | " Please recompile your kernel with ACPI processor\n"); | ||
426 | return -EINVAL; | ||
427 | } | ||
428 | #endif | ||
429 | |||
430 | static int powernow_decode_bios (int maxfid, int startvid) | ||
431 | { | ||
432 | struct psb_s *psb; | ||
433 | struct pst_s *pst; | ||
434 | unsigned int i, j; | ||
435 | unsigned char *p; | ||
436 | unsigned int etuple; | ||
437 | unsigned int ret; | ||
438 | |||
439 | etuple = cpuid_eax(0x80000001); | ||
440 | |||
441 | for (i=0xC0000; i < 0xffff0 ; i+=16) { | ||
442 | |||
443 | p = phys_to_virt(i); | ||
444 | |||
445 | if (memcmp(p, "AMDK7PNOW!", 10) == 0){ | ||
446 | dprintk ("Found PSB header at %p\n", p); | ||
447 | psb = (struct psb_s *) p; | ||
448 | dprintk ("Table version: 0x%x\n", psb->tableversion); | ||
449 | if (psb->tableversion != 0x12) { | ||
450 | printk (KERN_INFO PFX "Sorry, only v1.2 tables supported right now\n"); | ||
451 | return -ENODEV; | ||
452 | } | ||
453 | |||
454 | dprintk ("Flags: 0x%x\n", psb->flags); | ||
455 | if ((psb->flags & 1)==0) { | ||
456 | dprintk ("Mobile voltage regulator\n"); | ||
457 | } else { | ||
458 | dprintk ("Desktop voltage regulator\n"); | ||
459 | } | ||
460 | |||
461 | latency = psb->settlingtime; | ||
462 | if (latency < 100) { | ||
463 | printk (KERN_INFO PFX "BIOS set settling time to %d microseconds." | ||
464 | "Should be at least 100. Correcting.\n", latency); | ||
465 | latency = 100; | ||
466 | } | ||
467 | dprintk ("Settling Time: %d microseconds.\n", psb->settlingtime); | ||
468 | dprintk ("Has %d PST tables. (Only dumping ones relevant to this CPU).\n", psb->numpst); | ||
469 | |||
470 | p += sizeof (struct psb_s); | ||
471 | |||
472 | pst = (struct pst_s *) p; | ||
473 | |||
474 | for (j=0; j<psb->numpst; j++) { | ||
475 | pst = (struct pst_s *) p; | ||
476 | number_scales = pst->numpstates; | ||
477 | |||
478 | if ((etuple == pst->cpuid) && check_fsb(pst->fsbspeed) && | ||
479 | (maxfid==pst->maxfid) && (startvid==pst->startvid)) | ||
480 | { | ||
481 | dprintk ("PST:%d (@%p)\n", j, pst); | ||
482 | dprintk (" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n", | ||
483 | pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid); | ||
484 | |||
485 | ret = get_ranges ((char *) pst + sizeof (struct pst_s)); | ||
486 | return ret; | ||
487 | } else { | ||
488 | unsigned int k; | ||
489 | p = (char *) pst + sizeof (struct pst_s); | ||
490 | for (k=0; k<number_scales; k++) | ||
491 | p+=2; | ||
492 | } | ||
493 | } | ||
494 | printk (KERN_INFO PFX "No PST tables match this cpuid (0x%x)\n", etuple); | ||
495 | printk (KERN_INFO PFX "This is indicative of a broken BIOS.\n"); | ||
496 | |||
497 | return -EINVAL; | ||
498 | } | ||
499 | p++; | ||
500 | } | ||
501 | |||
502 | return -ENODEV; | ||
503 | } | ||
504 | |||
505 | |||
506 | static int powernow_target (struct cpufreq_policy *policy, | ||
507 | unsigned int target_freq, | ||
508 | unsigned int relation) | ||
509 | { | ||
510 | unsigned int newstate; | ||
511 | |||
512 | if (cpufreq_frequency_table_target(policy, powernow_table, target_freq, relation, &newstate)) | ||
513 | return -EINVAL; | ||
514 | |||
515 | change_speed(newstate); | ||
516 | |||
517 | return 0; | ||
518 | } | ||
519 | |||
520 | |||
521 | static int powernow_verify (struct cpufreq_policy *policy) | ||
522 | { | ||
523 | return cpufreq_frequency_table_verify(policy, powernow_table); | ||
524 | } | ||
525 | |||
526 | /* | ||
527 | * We use the fact that the bus frequency is somehow | ||
528 | * a multiple of 100000/3 khz, then we compute sgtc according | ||
529 | * to this multiple. | ||
530 | * That way, we match more how AMD thinks all of that work. | ||
531 | * We will then get the same kind of behaviour already tested under | ||
532 | * the "well-known" other OS. | ||
533 | */ | ||
534 | static int __init fixup_sgtc(void) | ||
535 | { | ||
536 | unsigned int sgtc; | ||
537 | unsigned int m; | ||
538 | |||
539 | m = fsb / 3333; | ||
540 | if ((m % 10) >= 5) | ||
541 | m += 5; | ||
542 | |||
543 | m /= 10; | ||
544 | |||
545 | sgtc = 100 * m * latency; | ||
546 | sgtc = sgtc / 3; | ||
547 | if (sgtc > 0xfffff) { | ||
548 | printk(KERN_WARNING PFX "SGTC too large %d\n", sgtc); | ||
549 | sgtc = 0xfffff; | ||
550 | } | ||
551 | return sgtc; | ||
552 | } | ||
553 | |||
554 | static unsigned int powernow_get(unsigned int cpu) | ||
555 | { | ||
556 | union msr_fidvidstatus fidvidstatus; | ||
557 | unsigned int cfid; | ||
558 | |||
559 | if (cpu) | ||
560 | return 0; | ||
561 | rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val); | ||
562 | cfid = fidvidstatus.bits.CFID; | ||
563 | |||
564 | return (fsb * fid_codes[cfid] / 10); | ||
565 | } | ||
566 | |||
567 | |||
568 | static int __init acer_cpufreq_pst(struct dmi_system_id *d) | ||
569 | { | ||
570 | printk(KERN_WARNING "%s laptop with broken PST tables in BIOS detected.\n", d->ident); | ||
571 | printk(KERN_WARNING "You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n"); | ||
572 | printk(KERN_WARNING "cpufreq scaling has been disabled as a result of this.\n"); | ||
573 | return 0; | ||
574 | } | ||
575 | |||
576 | /* | ||
577 | * Some Athlon laptops have really fucked PST tables. | ||
578 | * A BIOS update is all that can save them. | ||
579 | * Mention this, and disable cpufreq. | ||
580 | */ | ||
581 | static struct dmi_system_id __initdata powernow_dmi_table[] = { | ||
582 | { | ||
583 | .callback = acer_cpufreq_pst, | ||
584 | .ident = "Acer Aspire", | ||
585 | .matches = { | ||
586 | DMI_MATCH(DMI_SYS_VENDOR, "Insyde Software"), | ||
587 | DMI_MATCH(DMI_BIOS_VERSION, "3A71"), | ||
588 | }, | ||
589 | }, | ||
590 | { } | ||
591 | }; | ||
592 | |||
593 | static int __init powernow_cpu_init (struct cpufreq_policy *policy) | ||
594 | { | ||
595 | union msr_fidvidstatus fidvidstatus; | ||
596 | int result; | ||
597 | |||
598 | if (policy->cpu != 0) | ||
599 | return -ENODEV; | ||
600 | |||
601 | rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val); | ||
602 | |||
603 | recalibrate_cpu_khz(); | ||
604 | |||
605 | fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID]; | ||
606 | if (!fsb) { | ||
607 | printk(KERN_WARNING PFX "can not determine bus frequency\n"); | ||
608 | return -EINVAL; | ||
609 | } | ||
610 | dprintk("FSB: %3dMHz\n", fsb/1000); | ||
611 | |||
612 | if (dmi_check_system(powernow_dmi_table) || acpi_force) { | ||
613 | printk (KERN_INFO PFX "PSB/PST known to be broken. Trying ACPI instead\n"); | ||
614 | result = powernow_acpi_init(); | ||
615 | } else { | ||
616 | result = powernow_decode_bios(fidvidstatus.bits.MFID, fidvidstatus.bits.SVID); | ||
617 | if (result) { | ||
618 | printk (KERN_INFO PFX "Trying ACPI perflib\n"); | ||
619 | maximum_speed = 0; | ||
620 | minimum_speed = -1; | ||
621 | latency = 0; | ||
622 | result = powernow_acpi_init(); | ||
623 | if (result) { | ||
624 | printk (KERN_INFO PFX "ACPI and legacy methods failed\n"); | ||
625 | printk (KERN_INFO PFX "See http://www.codemonkey.org.uk/projects/cpufreq/powernow-k7.html\n"); | ||
626 | } | ||
627 | } else { | ||
628 | /* SGTC use the bus clock as timer */ | ||
629 | latency = fixup_sgtc(); | ||
630 | printk(KERN_INFO PFX "SGTC: %d\n", latency); | ||
631 | } | ||
632 | } | ||
633 | |||
634 | if (result) | ||
635 | return result; | ||
636 | |||
637 | printk (KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n", | ||
638 | minimum_speed/1000, maximum_speed/1000); | ||
639 | |||
640 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
641 | |||
642 | policy->cpuinfo.transition_latency = cpufreq_scale(2000000UL, fsb, latency); | ||
643 | |||
644 | policy->cur = powernow_get(0); | ||
645 | |||
646 | cpufreq_frequency_table_get_attr(powernow_table, policy->cpu); | ||
647 | |||
648 | return cpufreq_frequency_table_cpuinfo(policy, powernow_table); | ||
649 | } | ||
650 | |||
651 | static int powernow_cpu_exit (struct cpufreq_policy *policy) { | ||
652 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
653 | |||
654 | #ifdef CONFIG_X86_POWERNOW_K7_ACPI | ||
655 | if (acpi_processor_perf) { | ||
656 | acpi_processor_unregister_performance(acpi_processor_perf, 0); | ||
657 | kfree(acpi_processor_perf); | ||
658 | } | ||
659 | #endif | ||
660 | |||
661 | kfree(powernow_table); | ||
662 | return 0; | ||
663 | } | ||
664 | |||
665 | static struct freq_attr* powernow_table_attr[] = { | ||
666 | &cpufreq_freq_attr_scaling_available_freqs, | ||
667 | NULL, | ||
668 | }; | ||
669 | |||
670 | static struct cpufreq_driver powernow_driver = { | ||
671 | .verify = powernow_verify, | ||
672 | .target = powernow_target, | ||
673 | .get = powernow_get, | ||
674 | .init = powernow_cpu_init, | ||
675 | .exit = powernow_cpu_exit, | ||
676 | .name = "powernow-k7", | ||
677 | .owner = THIS_MODULE, | ||
678 | .attr = powernow_table_attr, | ||
679 | }; | ||
680 | |||
681 | static int __init powernow_init (void) | ||
682 | { | ||
683 | if (check_powernow()==0) | ||
684 | return -ENODEV; | ||
685 | return cpufreq_register_driver(&powernow_driver); | ||
686 | } | ||
687 | |||
688 | |||
689 | static void __exit powernow_exit (void) | ||
690 | { | ||
691 | cpufreq_unregister_driver(&powernow_driver); | ||
692 | } | ||
693 | |||
694 | module_param(acpi_force, int, 0444); | ||
695 | MODULE_PARM_DESC(acpi_force, "Force ACPI to be used."); | ||
696 | |||
697 | MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>"); | ||
698 | MODULE_DESCRIPTION ("Powernow driver for AMD K7 processors."); | ||
699 | MODULE_LICENSE ("GPL"); | ||
700 | |||
701 | late_initcall(powernow_init); | ||
702 | module_exit(powernow_exit); | ||
703 | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.h b/arch/x86/kernel/cpu/cpufreq/powernow-k7.h new file mode 100644 index 000000000000..f8a63b3664e3 --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | * $Id: powernow-k7.h,v 1.2 2003/02/10 18:26:01 davej Exp $ | ||
3 | * (C) 2003 Dave Jones. | ||
4 | * | ||
5 | * Licensed under the terms of the GNU GPL License version 2. | ||
6 | * | ||
7 | * AMD-specific information | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | union msr_fidvidctl { | ||
12 | struct { | ||
13 | unsigned FID:5, // 4:0 | ||
14 | reserved1:3, // 7:5 | ||
15 | VID:5, // 12:8 | ||
16 | reserved2:3, // 15:13 | ||
17 | FIDC:1, // 16 | ||
18 | VIDC:1, // 17 | ||
19 | reserved3:2, // 19:18 | ||
20 | FIDCHGRATIO:1, // 20 | ||
21 | reserved4:11, // 31-21 | ||
22 | SGTC:20, // 32:51 | ||
23 | reserved5:12; // 63:52 | ||
24 | } bits; | ||
25 | unsigned long long val; | ||
26 | }; | ||
27 | |||
28 | union msr_fidvidstatus { | ||
29 | struct { | ||
30 | unsigned CFID:5, // 4:0 | ||
31 | reserved1:3, // 7:5 | ||
32 | SFID:5, // 12:8 | ||
33 | reserved2:3, // 15:13 | ||
34 | MFID:5, // 20:16 | ||
35 | reserved3:11, // 31:21 | ||
36 | CVID:5, // 36:32 | ||
37 | reserved4:3, // 39:37 | ||
38 | SVID:5, // 44:40 | ||
39 | reserved5:3, // 47:45 | ||
40 | MVID:5, // 52:48 | ||
41 | reserved6:11; // 63:53 | ||
42 | } bits; | ||
43 | unsigned long long val; | ||
44 | }; | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c new file mode 100644 index 000000000000..34ed53a06730 --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -0,0 +1,1363 @@ | |||
1 | /* | ||
2 | * (c) 2003-2006 Advanced Micro Devices, Inc. | ||
3 | * Your use of this code is subject to the terms and conditions of the | ||
4 | * GNU general public license version 2. See "COPYING" or | ||
5 | * http://www.gnu.org/licenses/gpl.html | ||
6 | * | ||
7 | * Support : mark.langsdorf@amd.com | ||
8 | * | ||
9 | * Based on the powernow-k7.c module written by Dave Jones. | ||
10 | * (C) 2003 Dave Jones <davej@codemonkey.org.uk> on behalf of SuSE Labs | ||
11 | * (C) 2004 Dominik Brodowski <linux@brodo.de> | ||
12 | * (C) 2004 Pavel Machek <pavel@suse.cz> | ||
13 | * Licensed under the terms of the GNU GPL License version 2. | ||
14 | * Based upon datasheets & sample CPUs kindly provided by AMD. | ||
15 | * | ||
16 | * Valuable input gratefully received from Dave Jones, Pavel Machek, | ||
17 | * Dominik Brodowski, Jacob Shin, and others. | ||
18 | * Originally developed by Paul Devriendt. | ||
19 | * Processor information obtained from Chapter 9 (Power and Thermal Management) | ||
20 | * of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD | ||
21 | * Opteron Processors" available for download from www.amd.com | ||
22 | * | ||
23 | * Tables for specific CPUs can be inferred from | ||
24 | * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf | ||
25 | */ | ||
26 | |||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/smp.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/cpufreq.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/string.h> | ||
34 | #include <linux/cpumask.h> | ||
35 | #include <linux/sched.h> /* for current / set_cpus_allowed() */ | ||
36 | |||
37 | #include <asm/msr.h> | ||
38 | #include <asm/io.h> | ||
39 | #include <asm/delay.h> | ||
40 | |||
41 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI | ||
42 | #include <linux/acpi.h> | ||
43 | #include <linux/mutex.h> | ||
44 | #include <acpi/processor.h> | ||
45 | #endif | ||
46 | |||
47 | #define PFX "powernow-k8: " | ||
48 | #define BFX PFX "BIOS error: " | ||
49 | #define VERSION "version 2.00.00" | ||
50 | #include "powernow-k8.h" | ||
51 | |||
52 | /* serialize freq changes */ | ||
53 | static DEFINE_MUTEX(fidvid_mutex); | ||
54 | |||
55 | static struct powernow_k8_data *powernow_data[NR_CPUS]; | ||
56 | |||
57 | static int cpu_family = CPU_OPTERON; | ||
58 | |||
59 | #ifndef CONFIG_SMP | ||
60 | static cpumask_t cpu_core_map[1]; | ||
61 | #endif | ||
62 | |||
63 | /* Return a frequency in MHz, given an input fid */ | ||
64 | static u32 find_freq_from_fid(u32 fid) | ||
65 | { | ||
66 | return 800 + (fid * 100); | ||
67 | } | ||
68 | |||
69 | |||
70 | /* Return a frequency in KHz, given an input fid */ | ||
71 | static u32 find_khz_freq_from_fid(u32 fid) | ||
72 | { | ||
73 | return 1000 * find_freq_from_fid(fid); | ||
74 | } | ||
75 | |||
76 | /* Return a frequency in MHz, given an input fid and did */ | ||
77 | static u32 find_freq_from_fiddid(u32 fid, u32 did) | ||
78 | { | ||
79 | return 100 * (fid + 0x10) >> did; | ||
80 | } | ||
81 | |||
82 | static u32 find_khz_freq_from_fiddid(u32 fid, u32 did) | ||
83 | { | ||
84 | return 1000 * find_freq_from_fiddid(fid, did); | ||
85 | } | ||
86 | |||
87 | static u32 find_fid_from_pstate(u32 pstate) | ||
88 | { | ||
89 | u32 hi, lo; | ||
90 | rdmsr(MSR_PSTATE_DEF_BASE + pstate, lo, hi); | ||
91 | return lo & HW_PSTATE_FID_MASK; | ||
92 | } | ||
93 | |||
94 | static u32 find_did_from_pstate(u32 pstate) | ||
95 | { | ||
96 | u32 hi, lo; | ||
97 | rdmsr(MSR_PSTATE_DEF_BASE + pstate, lo, hi); | ||
98 | return (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT; | ||
99 | } | ||
100 | |||
101 | /* Return the vco fid for an input fid | ||
102 | * | ||
103 | * Each "low" fid has corresponding "high" fid, and you can get to "low" fids | ||
104 | * only from corresponding high fids. This returns "high" fid corresponding to | ||
105 | * "low" one. | ||
106 | */ | ||
107 | static u32 convert_fid_to_vco_fid(u32 fid) | ||
108 | { | ||
109 | if (fid < HI_FID_TABLE_BOTTOM) | ||
110 | return 8 + (2 * fid); | ||
111 | else | ||
112 | return fid; | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * Return 1 if the pending bit is set. Unless we just instructed the processor | ||
117 | * to transition to a new state, seeing this bit set is really bad news. | ||
118 | */ | ||
119 | static int pending_bit_stuck(void) | ||
120 | { | ||
121 | u32 lo, hi; | ||
122 | |||
123 | if (cpu_family == CPU_HW_PSTATE) | ||
124 | return 0; | ||
125 | |||
126 | rdmsr(MSR_FIDVID_STATUS, lo, hi); | ||
127 | return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0; | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * Update the global current fid / vid values from the status msr. | ||
132 | * Returns 1 on error. | ||
133 | */ | ||
134 | static int query_current_values_with_pending_wait(struct powernow_k8_data *data) | ||
135 | { | ||
136 | u32 lo, hi; | ||
137 | u32 i = 0; | ||
138 | |||
139 | if (cpu_family == CPU_HW_PSTATE) { | ||
140 | rdmsr(MSR_PSTATE_STATUS, lo, hi); | ||
141 | i = lo & HW_PSTATE_MASK; | ||
142 | rdmsr(MSR_PSTATE_DEF_BASE + i, lo, hi); | ||
143 | data->currfid = lo & HW_PSTATE_FID_MASK; | ||
144 | data->currdid = (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT; | ||
145 | return 0; | ||
146 | } | ||
147 | do { | ||
148 | if (i++ > 10000) { | ||
149 | dprintk("detected change pending stuck\n"); | ||
150 | return 1; | ||
151 | } | ||
152 | rdmsr(MSR_FIDVID_STATUS, lo, hi); | ||
153 | } while (lo & MSR_S_LO_CHANGE_PENDING); | ||
154 | |||
155 | data->currvid = hi & MSR_S_HI_CURRENT_VID; | ||
156 | data->currfid = lo & MSR_S_LO_CURRENT_FID; | ||
157 | |||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | /* the isochronous relief time */ | ||
162 | static void count_off_irt(struct powernow_k8_data *data) | ||
163 | { | ||
164 | udelay((1 << data->irt) * 10); | ||
165 | return; | ||
166 | } | ||
167 | |||
168 | /* the voltage stabalization time */ | ||
169 | static void count_off_vst(struct powernow_k8_data *data) | ||
170 | { | ||
171 | udelay(data->vstable * VST_UNITS_20US); | ||
172 | return; | ||
173 | } | ||
174 | |||
175 | /* need to init the control msr to a safe value (for each cpu) */ | ||
176 | static void fidvid_msr_init(void) | ||
177 | { | ||
178 | u32 lo, hi; | ||
179 | u8 fid, vid; | ||
180 | |||
181 | rdmsr(MSR_FIDVID_STATUS, lo, hi); | ||
182 | vid = hi & MSR_S_HI_CURRENT_VID; | ||
183 | fid = lo & MSR_S_LO_CURRENT_FID; | ||
184 | lo = fid | (vid << MSR_C_LO_VID_SHIFT); | ||
185 | hi = MSR_C_HI_STP_GNT_BENIGN; | ||
186 | dprintk("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi); | ||
187 | wrmsr(MSR_FIDVID_CTL, lo, hi); | ||
188 | } | ||
189 | |||
190 | |||
191 | /* write the new fid value along with the other control fields to the msr */ | ||
192 | static int write_new_fid(struct powernow_k8_data *data, u32 fid) | ||
193 | { | ||
194 | u32 lo; | ||
195 | u32 savevid = data->currvid; | ||
196 | u32 i = 0; | ||
197 | |||
198 | if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) { | ||
199 | printk(KERN_ERR PFX "internal error - overflow on fid write\n"); | ||
200 | return 1; | ||
201 | } | ||
202 | |||
203 | lo = fid | (data->currvid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID; | ||
204 | |||
205 | dprintk("writing fid 0x%x, lo 0x%x, hi 0x%x\n", | ||
206 | fid, lo, data->plllock * PLL_LOCK_CONVERSION); | ||
207 | |||
208 | do { | ||
209 | wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION); | ||
210 | if (i++ > 100) { | ||
211 | printk(KERN_ERR PFX "Hardware error - pending bit very stuck - no further pstate changes possible\n"); | ||
212 | return 1; | ||
213 | } | ||
214 | } while (query_current_values_with_pending_wait(data)); | ||
215 | |||
216 | count_off_irt(data); | ||
217 | |||
218 | if (savevid != data->currvid) { | ||
219 | printk(KERN_ERR PFX "vid change on fid trans, old 0x%x, new 0x%x\n", | ||
220 | savevid, data->currvid); | ||
221 | return 1; | ||
222 | } | ||
223 | |||
224 | if (fid != data->currfid) { | ||
225 | printk(KERN_ERR PFX "fid trans failed, fid 0x%x, curr 0x%x\n", fid, | ||
226 | data->currfid); | ||
227 | return 1; | ||
228 | } | ||
229 | |||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | /* Write a new vid to the hardware */ | ||
234 | static int write_new_vid(struct powernow_k8_data *data, u32 vid) | ||
235 | { | ||
236 | u32 lo; | ||
237 | u32 savefid = data->currfid; | ||
238 | int i = 0; | ||
239 | |||
240 | if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) { | ||
241 | printk(KERN_ERR PFX "internal error - overflow on vid write\n"); | ||
242 | return 1; | ||
243 | } | ||
244 | |||
245 | lo = data->currfid | (vid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID; | ||
246 | |||
247 | dprintk("writing vid 0x%x, lo 0x%x, hi 0x%x\n", | ||
248 | vid, lo, STOP_GRANT_5NS); | ||
249 | |||
250 | do { | ||
251 | wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS); | ||
252 | if (i++ > 100) { | ||
253 | printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n"); | ||
254 | return 1; | ||
255 | } | ||
256 | } while (query_current_values_with_pending_wait(data)); | ||
257 | |||
258 | if (savefid != data->currfid) { | ||
259 | printk(KERN_ERR PFX "fid changed on vid trans, old 0x%x new 0x%x\n", | ||
260 | savefid, data->currfid); | ||
261 | return 1; | ||
262 | } | ||
263 | |||
264 | if (vid != data->currvid) { | ||
265 | printk(KERN_ERR PFX "vid trans failed, vid 0x%x, curr 0x%x\n", vid, | ||
266 | data->currvid); | ||
267 | return 1; | ||
268 | } | ||
269 | |||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Reduce the vid by the max of step or reqvid. | ||
275 | * Decreasing vid codes represent increasing voltages: | ||
276 | * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off. | ||
277 | */ | ||
278 | static int decrease_vid_code_by_step(struct powernow_k8_data *data, u32 reqvid, u32 step) | ||
279 | { | ||
280 | if ((data->currvid - reqvid) > step) | ||
281 | reqvid = data->currvid - step; | ||
282 | |||
283 | if (write_new_vid(data, reqvid)) | ||
284 | return 1; | ||
285 | |||
286 | count_off_vst(data); | ||
287 | |||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | /* Change hardware pstate by single MSR write */ | ||
292 | static int transition_pstate(struct powernow_k8_data *data, u32 pstate) | ||
293 | { | ||
294 | wrmsr(MSR_PSTATE_CTRL, pstate, 0); | ||
295 | data->currfid = find_fid_from_pstate(pstate); | ||
296 | return 0; | ||
297 | } | ||
298 | |||
299 | /* Change Opteron/Athlon64 fid and vid, by the 3 phases. */ | ||
300 | static int transition_fid_vid(struct powernow_k8_data *data, u32 reqfid, u32 reqvid) | ||
301 | { | ||
302 | if (core_voltage_pre_transition(data, reqvid)) | ||
303 | return 1; | ||
304 | |||
305 | if (core_frequency_transition(data, reqfid)) | ||
306 | return 1; | ||
307 | |||
308 | if (core_voltage_post_transition(data, reqvid)) | ||
309 | return 1; | ||
310 | |||
311 | if (query_current_values_with_pending_wait(data)) | ||
312 | return 1; | ||
313 | |||
314 | if ((reqfid != data->currfid) || (reqvid != data->currvid)) { | ||
315 | printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, curr 0x%x 0x%x\n", | ||
316 | smp_processor_id(), | ||
317 | reqfid, reqvid, data->currfid, data->currvid); | ||
318 | return 1; | ||
319 | } | ||
320 | |||
321 | dprintk("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n", | ||
322 | smp_processor_id(), data->currfid, data->currvid); | ||
323 | |||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | /* Phase 1 - core voltage transition ... setup voltage */ | ||
328 | static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid) | ||
329 | { | ||
330 | u32 rvosteps = data->rvo; | ||
331 | u32 savefid = data->currfid; | ||
332 | u32 maxvid, lo; | ||
333 | |||
334 | dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n", | ||
335 | smp_processor_id(), | ||
336 | data->currfid, data->currvid, reqvid, data->rvo); | ||
337 | |||
338 | rdmsr(MSR_FIDVID_STATUS, lo, maxvid); | ||
339 | maxvid = 0x1f & (maxvid >> 16); | ||
340 | dprintk("ph1 maxvid=0x%x\n", maxvid); | ||
341 | if (reqvid < maxvid) /* lower numbers are higher voltages */ | ||
342 | reqvid = maxvid; | ||
343 | |||
344 | while (data->currvid > reqvid) { | ||
345 | dprintk("ph1: curr 0x%x, req vid 0x%x\n", | ||
346 | data->currvid, reqvid); | ||
347 | if (decrease_vid_code_by_step(data, reqvid, data->vidmvs)) | ||
348 | return 1; | ||
349 | } | ||
350 | |||
351 | while ((rvosteps > 0) && ((data->rvo + data->currvid) > reqvid)) { | ||
352 | if (data->currvid == maxvid) { | ||
353 | rvosteps = 0; | ||
354 | } else { | ||
355 | dprintk("ph1: changing vid for rvo, req 0x%x\n", | ||
356 | data->currvid - 1); | ||
357 | if (decrease_vid_code_by_step(data, data->currvid - 1, 1)) | ||
358 | return 1; | ||
359 | rvosteps--; | ||
360 | } | ||
361 | } | ||
362 | |||
363 | if (query_current_values_with_pending_wait(data)) | ||
364 | return 1; | ||
365 | |||
366 | if (savefid != data->currfid) { | ||
367 | printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n", data->currfid); | ||
368 | return 1; | ||
369 | } | ||
370 | |||
371 | dprintk("ph1 complete, currfid 0x%x, currvid 0x%x\n", | ||
372 | data->currfid, data->currvid); | ||
373 | |||
374 | return 0; | ||
375 | } | ||
376 | |||
377 | /* Phase 2 - core frequency transition */ | ||
378 | static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) | ||
379 | { | ||
380 | u32 vcoreqfid, vcocurrfid, vcofiddiff, fid_interval, savevid = data->currvid; | ||
381 | |||
382 | if ((reqfid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) { | ||
383 | printk(KERN_ERR PFX "ph2: illegal lo-lo transition 0x%x 0x%x\n", | ||
384 | reqfid, data->currfid); | ||
385 | return 1; | ||
386 | } | ||
387 | |||
388 | if (data->currfid == reqfid) { | ||
389 | printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", data->currfid); | ||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | dprintk("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n", | ||
394 | smp_processor_id(), | ||
395 | data->currfid, data->currvid, reqfid); | ||
396 | |||
397 | vcoreqfid = convert_fid_to_vco_fid(reqfid); | ||
398 | vcocurrfid = convert_fid_to_vco_fid(data->currfid); | ||
399 | vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid | ||
400 | : vcoreqfid - vcocurrfid; | ||
401 | |||
402 | while (vcofiddiff > 2) { | ||
403 | (data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2); | ||
404 | |||
405 | if (reqfid > data->currfid) { | ||
406 | if (data->currfid > LO_FID_TABLE_TOP) { | ||
407 | if (write_new_fid(data, data->currfid + fid_interval)) { | ||
408 | return 1; | ||
409 | } | ||
410 | } else { | ||
411 | if (write_new_fid | ||
412 | (data, 2 + convert_fid_to_vco_fid(data->currfid))) { | ||
413 | return 1; | ||
414 | } | ||
415 | } | ||
416 | } else { | ||
417 | if (write_new_fid(data, data->currfid - fid_interval)) | ||
418 | return 1; | ||
419 | } | ||
420 | |||
421 | vcocurrfid = convert_fid_to_vco_fid(data->currfid); | ||
422 | vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid | ||
423 | : vcoreqfid - vcocurrfid; | ||
424 | } | ||
425 | |||
426 | if (write_new_fid(data, reqfid)) | ||
427 | return 1; | ||
428 | |||
429 | if (query_current_values_with_pending_wait(data)) | ||
430 | return 1; | ||
431 | |||
432 | if (data->currfid != reqfid) { | ||
433 | printk(KERN_ERR PFX | ||
434 | "ph2: mismatch, failed fid transition, curr 0x%x, req 0x%x\n", | ||
435 | data->currfid, reqfid); | ||
436 | return 1; | ||
437 | } | ||
438 | |||
439 | if (savevid != data->currvid) { | ||
440 | printk(KERN_ERR PFX "ph2: vid changed, save 0x%x, curr 0x%x\n", | ||
441 | savevid, data->currvid); | ||
442 | return 1; | ||
443 | } | ||
444 | |||
445 | dprintk("ph2 complete, currfid 0x%x, currvid 0x%x\n", | ||
446 | data->currfid, data->currvid); | ||
447 | |||
448 | return 0; | ||
449 | } | ||
450 | |||
451 | /* Phase 3 - core voltage transition flow ... jump to the final vid. */ | ||
452 | static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid) | ||
453 | { | ||
454 | u32 savefid = data->currfid; | ||
455 | u32 savereqvid = reqvid; | ||
456 | |||
457 | dprintk("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n", | ||
458 | smp_processor_id(), | ||
459 | data->currfid, data->currvid); | ||
460 | |||
461 | if (reqvid != data->currvid) { | ||
462 | if (write_new_vid(data, reqvid)) | ||
463 | return 1; | ||
464 | |||
465 | if (savefid != data->currfid) { | ||
466 | printk(KERN_ERR PFX | ||
467 | "ph3: bad fid change, save 0x%x, curr 0x%x\n", | ||
468 | savefid, data->currfid); | ||
469 | return 1; | ||
470 | } | ||
471 | |||
472 | if (data->currvid != reqvid) { | ||
473 | printk(KERN_ERR PFX | ||
474 | "ph3: failed vid transition\n, req 0x%x, curr 0x%x", | ||
475 | reqvid, data->currvid); | ||
476 | return 1; | ||
477 | } | ||
478 | } | ||
479 | |||
480 | if (query_current_values_with_pending_wait(data)) | ||
481 | return 1; | ||
482 | |||
483 | if (savereqvid != data->currvid) { | ||
484 | dprintk("ph3 failed, currvid 0x%x\n", data->currvid); | ||
485 | return 1; | ||
486 | } | ||
487 | |||
488 | if (savefid != data->currfid) { | ||
489 | dprintk("ph3 failed, currfid changed 0x%x\n", | ||
490 | data->currfid); | ||
491 | return 1; | ||
492 | } | ||
493 | |||
494 | dprintk("ph3 complete, currfid 0x%x, currvid 0x%x\n", | ||
495 | data->currfid, data->currvid); | ||
496 | |||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | static int check_supported_cpu(unsigned int cpu) | ||
501 | { | ||
502 | cpumask_t oldmask = CPU_MASK_ALL; | ||
503 | u32 eax, ebx, ecx, edx; | ||
504 | unsigned int rc = 0; | ||
505 | |||
506 | oldmask = current->cpus_allowed; | ||
507 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
508 | |||
509 | if (smp_processor_id() != cpu) { | ||
510 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); | ||
511 | goto out; | ||
512 | } | ||
513 | |||
514 | if (current_cpu_data.x86_vendor != X86_VENDOR_AMD) | ||
515 | goto out; | ||
516 | |||
517 | eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); | ||
518 | if (((eax & CPUID_XFAM) != CPUID_XFAM_K8) && | ||
519 | ((eax & CPUID_XFAM) < CPUID_XFAM_10H)) | ||
520 | goto out; | ||
521 | |||
522 | if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) { | ||
523 | if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || | ||
524 | ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) { | ||
525 | printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax); | ||
526 | goto out; | ||
527 | } | ||
528 | |||
529 | eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES); | ||
530 | if (eax < CPUID_FREQ_VOLT_CAPABILITIES) { | ||
531 | printk(KERN_INFO PFX | ||
532 | "No frequency change capabilities detected\n"); | ||
533 | goto out; | ||
534 | } | ||
535 | |||
536 | cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); | ||
537 | if ((edx & P_STATE_TRANSITION_CAPABLE) != P_STATE_TRANSITION_CAPABLE) { | ||
538 | printk(KERN_INFO PFX "Power state transitions not supported\n"); | ||
539 | goto out; | ||
540 | } | ||
541 | } else { /* must be a HW Pstate capable processor */ | ||
542 | cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); | ||
543 | if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE) | ||
544 | cpu_family = CPU_HW_PSTATE; | ||
545 | else | ||
546 | goto out; | ||
547 | } | ||
548 | |||
549 | rc = 1; | ||
550 | |||
551 | out: | ||
552 | set_cpus_allowed(current, oldmask); | ||
553 | return rc; | ||
554 | } | ||
555 | |||
556 | static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid) | ||
557 | { | ||
558 | unsigned int j; | ||
559 | u8 lastfid = 0xff; | ||
560 | |||
561 | for (j = 0; j < data->numps; j++) { | ||
562 | if (pst[j].vid > LEAST_VID) { | ||
563 | printk(KERN_ERR PFX "vid %d invalid : 0x%x\n", j, pst[j].vid); | ||
564 | return -EINVAL; | ||
565 | } | ||
566 | if (pst[j].vid < data->rvo) { /* vid + rvo >= 0 */ | ||
567 | printk(KERN_ERR BFX "0 vid exceeded with pstate %d\n", j); | ||
568 | return -ENODEV; | ||
569 | } | ||
570 | if (pst[j].vid < maxvid + data->rvo) { /* vid + rvo >= maxvid */ | ||
571 | printk(KERN_ERR BFX "maxvid exceeded with pstate %d\n", j); | ||
572 | return -ENODEV; | ||
573 | } | ||
574 | if (pst[j].fid > MAX_FID) { | ||
575 | printk(KERN_ERR BFX "maxfid exceeded with pstate %d\n", j); | ||
576 | return -ENODEV; | ||
577 | } | ||
578 | if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) { | ||
579 | /* Only first fid is allowed to be in "low" range */ | ||
580 | printk(KERN_ERR BFX "two low fids - %d : 0x%x\n", j, pst[j].fid); | ||
581 | return -EINVAL; | ||
582 | } | ||
583 | if (pst[j].fid < lastfid) | ||
584 | lastfid = pst[j].fid; | ||
585 | } | ||
586 | if (lastfid & 1) { | ||
587 | printk(KERN_ERR BFX "lastfid invalid\n"); | ||
588 | return -EINVAL; | ||
589 | } | ||
590 | if (lastfid > LO_FID_TABLE_TOP) | ||
591 | printk(KERN_INFO BFX "first fid not from lo freq table\n"); | ||
592 | |||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | static void print_basics(struct powernow_k8_data *data) | ||
597 | { | ||
598 | int j; | ||
599 | for (j = 0; j < data->numps; j++) { | ||
600 | if (data->powernow_table[j].frequency != CPUFREQ_ENTRY_INVALID) { | ||
601 | if (cpu_family == CPU_HW_PSTATE) { | ||
602 | printk(KERN_INFO PFX " %d : fid 0x%x did 0x%x (%d MHz)\n", | ||
603 | j, | ||
604 | (data->powernow_table[j].index & 0xff00) >> 8, | ||
605 | (data->powernow_table[j].index & 0xff0000) >> 16, | ||
606 | data->powernow_table[j].frequency/1000); | ||
607 | } else { | ||
608 | printk(KERN_INFO PFX " %d : fid 0x%x (%d MHz), vid 0x%x\n", | ||
609 | j, | ||
610 | data->powernow_table[j].index & 0xff, | ||
611 | data->powernow_table[j].frequency/1000, | ||
612 | data->powernow_table[j].index >> 8); | ||
613 | } | ||
614 | } | ||
615 | } | ||
616 | if (data->batps) | ||
617 | printk(KERN_INFO PFX "Only %d pstates on battery\n", data->batps); | ||
618 | } | ||
619 | |||
620 | static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid) | ||
621 | { | ||
622 | struct cpufreq_frequency_table *powernow_table; | ||
623 | unsigned int j; | ||
624 | |||
625 | if (data->batps) { /* use ACPI support to get full speed on mains power */ | ||
626 | printk(KERN_WARNING PFX "Only %d pstates usable (use ACPI driver for full range\n", data->batps); | ||
627 | data->numps = data->batps; | ||
628 | } | ||
629 | |||
630 | for ( j=1; j<data->numps; j++ ) { | ||
631 | if (pst[j-1].fid >= pst[j].fid) { | ||
632 | printk(KERN_ERR PFX "PST out of sequence\n"); | ||
633 | return -EINVAL; | ||
634 | } | ||
635 | } | ||
636 | |||
637 | if (data->numps < 2) { | ||
638 | printk(KERN_ERR PFX "no p states to transition\n"); | ||
639 | return -ENODEV; | ||
640 | } | ||
641 | |||
642 | if (check_pst_table(data, pst, maxvid)) | ||
643 | return -EINVAL; | ||
644 | |||
645 | powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) | ||
646 | * (data->numps + 1)), GFP_KERNEL); | ||
647 | if (!powernow_table) { | ||
648 | printk(KERN_ERR PFX "powernow_table memory alloc failure\n"); | ||
649 | return -ENOMEM; | ||
650 | } | ||
651 | |||
652 | for (j = 0; j < data->numps; j++) { | ||
653 | powernow_table[j].index = pst[j].fid; /* lower 8 bits */ | ||
654 | powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */ | ||
655 | powernow_table[j].frequency = find_khz_freq_from_fid(pst[j].fid); | ||
656 | } | ||
657 | powernow_table[data->numps].frequency = CPUFREQ_TABLE_END; | ||
658 | powernow_table[data->numps].index = 0; | ||
659 | |||
660 | if (query_current_values_with_pending_wait(data)) { | ||
661 | kfree(powernow_table); | ||
662 | return -EIO; | ||
663 | } | ||
664 | |||
665 | dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); | ||
666 | data->powernow_table = powernow_table; | ||
667 | if (first_cpu(cpu_core_map[data->cpu]) == data->cpu) | ||
668 | print_basics(data); | ||
669 | |||
670 | for (j = 0; j < data->numps; j++) | ||
671 | if ((pst[j].fid==data->currfid) && (pst[j].vid==data->currvid)) | ||
672 | return 0; | ||
673 | |||
674 | dprintk("currfid/vid do not match PST, ignoring\n"); | ||
675 | return 0; | ||
676 | } | ||
677 | |||
678 | /* Find and validate the PSB/PST table in BIOS. */ | ||
679 | static int find_psb_table(struct powernow_k8_data *data) | ||
680 | { | ||
681 | struct psb_s *psb; | ||
682 | unsigned int i; | ||
683 | u32 mvs; | ||
684 | u8 maxvid; | ||
685 | u32 cpst = 0; | ||
686 | u32 thiscpuid; | ||
687 | |||
688 | for (i = 0xc0000; i < 0xffff0; i += 0x10) { | ||
689 | /* Scan BIOS looking for the signature. */ | ||
690 | /* It can not be at ffff0 - it is too big. */ | ||
691 | |||
692 | psb = phys_to_virt(i); | ||
693 | if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0) | ||
694 | continue; | ||
695 | |||
696 | dprintk("found PSB header at 0x%p\n", psb); | ||
697 | |||
698 | dprintk("table vers: 0x%x\n", psb->tableversion); | ||
699 | if (psb->tableversion != PSB_VERSION_1_4) { | ||
700 | printk(KERN_ERR BFX "PSB table is not v1.4\n"); | ||
701 | return -ENODEV; | ||
702 | } | ||
703 | |||
704 | dprintk("flags: 0x%x\n", psb->flags1); | ||
705 | if (psb->flags1) { | ||
706 | printk(KERN_ERR BFX "unknown flags\n"); | ||
707 | return -ENODEV; | ||
708 | } | ||
709 | |||
710 | data->vstable = psb->vstable; | ||
711 | dprintk("voltage stabilization time: %d(*20us)\n", data->vstable); | ||
712 | |||
713 | dprintk("flags2: 0x%x\n", psb->flags2); | ||
714 | data->rvo = psb->flags2 & 3; | ||
715 | data->irt = ((psb->flags2) >> 2) & 3; | ||
716 | mvs = ((psb->flags2) >> 4) & 3; | ||
717 | data->vidmvs = 1 << mvs; | ||
718 | data->batps = ((psb->flags2) >> 6) & 3; | ||
719 | |||
720 | dprintk("ramp voltage offset: %d\n", data->rvo); | ||
721 | dprintk("isochronous relief time: %d\n", data->irt); | ||
722 | dprintk("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs); | ||
723 | |||
724 | dprintk("numpst: 0x%x\n", psb->num_tables); | ||
725 | cpst = psb->num_tables; | ||
726 | if ((psb->cpuid == 0x00000fc0) || (psb->cpuid == 0x00000fe0) ){ | ||
727 | thiscpuid = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); | ||
728 | if ((thiscpuid == 0x00000fc0) || (thiscpuid == 0x00000fe0) ) { | ||
729 | cpst = 1; | ||
730 | } | ||
731 | } | ||
732 | if (cpst != 1) { | ||
733 | printk(KERN_ERR BFX "numpst must be 1\n"); | ||
734 | return -ENODEV; | ||
735 | } | ||
736 | |||
737 | data->plllock = psb->plllocktime; | ||
738 | dprintk("plllocktime: 0x%x (units 1us)\n", psb->plllocktime); | ||
739 | dprintk("maxfid: 0x%x\n", psb->maxfid); | ||
740 | dprintk("maxvid: 0x%x\n", psb->maxvid); | ||
741 | maxvid = psb->maxvid; | ||
742 | |||
743 | data->numps = psb->numps; | ||
744 | dprintk("numpstates: 0x%x\n", data->numps); | ||
745 | return fill_powernow_table(data, (struct pst_s *)(psb+1), maxvid); | ||
746 | } | ||
747 | /* | ||
748 | * If you see this message, complain to BIOS manufacturer. If | ||
749 | * he tells you "we do not support Linux" or some similar | ||
750 | * nonsense, remember that Windows 2000 uses the same legacy | ||
751 | * mechanism that the old Linux PSB driver uses. Tell them it | ||
752 | * is broken with Windows 2000. | ||
753 | * | ||
754 | * The reference to the AMD documentation is chapter 9 in the | ||
755 | * BIOS and Kernel Developer's Guide, which is available on | ||
756 | * www.amd.com | ||
757 | */ | ||
758 | printk(KERN_ERR PFX "BIOS error - no PSB or ACPI _PSS objects\n"); | ||
759 | return -ENODEV; | ||
760 | } | ||
761 | |||
762 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI | ||
763 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) | ||
764 | { | ||
765 | if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) | ||
766 | return; | ||
767 | |||
768 | data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK; | ||
769 | data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK; | ||
770 | data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; | ||
771 | data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; | ||
772 | data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK); | ||
773 | data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK; | ||
774 | } | ||
775 | |||
776 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | ||
777 | { | ||
778 | struct cpufreq_frequency_table *powernow_table; | ||
779 | int ret_val; | ||
780 | |||
781 | if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { | ||
782 | dprintk("register performance failed: bad ACPI data\n"); | ||
783 | return -EIO; | ||
784 | } | ||
785 | |||
786 | /* verify the data contained in the ACPI structures */ | ||
787 | if (data->acpi_data.state_count <= 1) { | ||
788 | dprintk("No ACPI P-States\n"); | ||
789 | goto err_out; | ||
790 | } | ||
791 | |||
792 | if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || | ||
793 | (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { | ||
794 | dprintk("Invalid control/status registers (%x - %x)\n", | ||
795 | data->acpi_data.control_register.space_id, | ||
796 | data->acpi_data.status_register.space_id); | ||
797 | goto err_out; | ||
798 | } | ||
799 | |||
800 | /* fill in data->powernow_table */ | ||
801 | powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) | ||
802 | * (data->acpi_data.state_count + 1)), GFP_KERNEL); | ||
803 | if (!powernow_table) { | ||
804 | dprintk("powernow_table memory alloc failure\n"); | ||
805 | goto err_out; | ||
806 | } | ||
807 | |||
808 | if (cpu_family == CPU_HW_PSTATE) | ||
809 | ret_val = fill_powernow_table_pstate(data, powernow_table); | ||
810 | else | ||
811 | ret_val = fill_powernow_table_fidvid(data, powernow_table); | ||
812 | if (ret_val) | ||
813 | goto err_out_mem; | ||
814 | |||
815 | powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END; | ||
816 | powernow_table[data->acpi_data.state_count].index = 0; | ||
817 | data->powernow_table = powernow_table; | ||
818 | |||
819 | /* fill in data */ | ||
820 | data->numps = data->acpi_data.state_count; | ||
821 | if (first_cpu(cpu_core_map[data->cpu]) == data->cpu) | ||
822 | print_basics(data); | ||
823 | powernow_k8_acpi_pst_values(data, 0); | ||
824 | |||
825 | /* notify BIOS that we exist */ | ||
826 | acpi_processor_notify_smm(THIS_MODULE); | ||
827 | |||
828 | return 0; | ||
829 | |||
830 | err_out_mem: | ||
831 | kfree(powernow_table); | ||
832 | |||
833 | err_out: | ||
834 | acpi_processor_unregister_performance(&data->acpi_data, data->cpu); | ||
835 | |||
836 | /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ | ||
837 | data->acpi_data.state_count = 0; | ||
838 | |||
839 | return -ENODEV; | ||
840 | } | ||
841 | |||
842 | static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) | ||
843 | { | ||
844 | int i; | ||
845 | |||
846 | for (i = 0; i < data->acpi_data.state_count; i++) { | ||
847 | u32 index; | ||
848 | u32 hi = 0, lo = 0; | ||
849 | u32 fid; | ||
850 | u32 did; | ||
851 | |||
852 | index = data->acpi_data.states[i].control & HW_PSTATE_MASK; | ||
853 | if (index > MAX_HW_PSTATE) { | ||
854 | printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); | ||
855 | printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); | ||
856 | } | ||
857 | rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); | ||
858 | if (!(hi & HW_PSTATE_VALID_MASK)) { | ||
859 | dprintk("invalid pstate %d, ignoring\n", index); | ||
860 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
861 | continue; | ||
862 | } | ||
863 | |||
864 | fid = lo & HW_PSTATE_FID_MASK; | ||
865 | did = (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT; | ||
866 | |||
867 | dprintk(" %d : fid 0x%x, did 0x%x\n", index, fid, did); | ||
868 | |||
869 | powernow_table[i].index = index | (fid << HW_FID_INDEX_SHIFT) | (did << HW_DID_INDEX_SHIFT); | ||
870 | |||
871 | powernow_table[i].frequency = find_khz_freq_from_fiddid(fid, did); | ||
872 | |||
873 | if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) { | ||
874 | printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", | ||
875 | powernow_table[i].frequency, | ||
876 | (unsigned int) (data->acpi_data.states[i].core_frequency * 1000)); | ||
877 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
878 | continue; | ||
879 | } | ||
880 | } | ||
881 | return 0; | ||
882 | } | ||
883 | |||
884 | static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) | ||
885 | { | ||
886 | int i; | ||
887 | int cntlofreq = 0; | ||
888 | for (i = 0; i < data->acpi_data.state_count; i++) { | ||
889 | u32 fid; | ||
890 | u32 vid; | ||
891 | |||
892 | if (data->exttype) { | ||
893 | fid = data->acpi_data.states[i].status & EXT_FID_MASK; | ||
894 | vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK; | ||
895 | } else { | ||
896 | fid = data->acpi_data.states[i].control & FID_MASK; | ||
897 | vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK; | ||
898 | } | ||
899 | |||
900 | dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); | ||
901 | |||
902 | powernow_table[i].index = fid; /* lower 8 bits */ | ||
903 | powernow_table[i].index |= (vid << 8); /* upper 8 bits */ | ||
904 | powernow_table[i].frequency = find_khz_freq_from_fid(fid); | ||
905 | |||
906 | /* verify frequency is OK */ | ||
907 | if ((powernow_table[i].frequency > (MAX_FREQ * 1000)) || | ||
908 | (powernow_table[i].frequency < (MIN_FREQ * 1000))) { | ||
909 | dprintk("invalid freq %u kHz, ignoring\n", powernow_table[i].frequency); | ||
910 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
911 | continue; | ||
912 | } | ||
913 | |||
914 | /* verify voltage is OK - BIOSs are using "off" to indicate invalid */ | ||
915 | if (vid == VID_OFF) { | ||
916 | dprintk("invalid vid %u, ignoring\n", vid); | ||
917 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
918 | continue; | ||
919 | } | ||
920 | |||
921 | /* verify only 1 entry from the lo frequency table */ | ||
922 | if (fid < HI_FID_TABLE_BOTTOM) { | ||
923 | if (cntlofreq) { | ||
924 | /* if both entries are the same, ignore this one ... */ | ||
925 | if ((powernow_table[i].frequency != powernow_table[cntlofreq].frequency) || | ||
926 | (powernow_table[i].index != powernow_table[cntlofreq].index)) { | ||
927 | printk(KERN_ERR PFX "Too many lo freq table entries\n"); | ||
928 | return 1; | ||
929 | } | ||
930 | |||
931 | dprintk("double low frequency table entry, ignoring it.\n"); | ||
932 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
933 | continue; | ||
934 | } else | ||
935 | cntlofreq = i; | ||
936 | } | ||
937 | |||
938 | if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) { | ||
939 | printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", | ||
940 | powernow_table[i].frequency, | ||
941 | (unsigned int) (data->acpi_data.states[i].core_frequency * 1000)); | ||
942 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
943 | continue; | ||
944 | } | ||
945 | } | ||
946 | return 0; | ||
947 | } | ||
948 | |||
949 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) | ||
950 | { | ||
951 | if (data->acpi_data.state_count) | ||
952 | acpi_processor_unregister_performance(&data->acpi_data, data->cpu); | ||
953 | } | ||
954 | |||
955 | #else | ||
956 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } | ||
957 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } | ||
958 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } | ||
959 | #endif /* CONFIG_X86_POWERNOW_K8_ACPI */ | ||
960 | |||
961 | /* Take a frequency, and issue the fid/vid transition command */ | ||
962 | static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned int index) | ||
963 | { | ||
964 | u32 fid = 0; | ||
965 | u32 vid = 0; | ||
966 | int res, i; | ||
967 | struct cpufreq_freqs freqs; | ||
968 | |||
969 | dprintk("cpu %d transition to index %u\n", smp_processor_id(), index); | ||
970 | |||
971 | /* fid/vid correctness check for k8 */ | ||
972 | /* fid are the lower 8 bits of the index we stored into | ||
973 | * the cpufreq frequency table in find_psb_table, vid | ||
974 | * are the upper 8 bits. | ||
975 | */ | ||
976 | fid = data->powernow_table[index].index & 0xFF; | ||
977 | vid = (data->powernow_table[index].index & 0xFF00) >> 8; | ||
978 | |||
979 | dprintk("table matched fid 0x%x, giving vid 0x%x\n", fid, vid); | ||
980 | |||
981 | if (query_current_values_with_pending_wait(data)) | ||
982 | return 1; | ||
983 | |||
984 | if ((data->currvid == vid) && (data->currfid == fid)) { | ||
985 | dprintk("target matches current values (fid 0x%x, vid 0x%x)\n", | ||
986 | fid, vid); | ||
987 | return 0; | ||
988 | } | ||
989 | |||
990 | if ((fid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) { | ||
991 | printk(KERN_ERR PFX | ||
992 | "ignoring illegal change in lo freq table-%x to 0x%x\n", | ||
993 | data->currfid, fid); | ||
994 | return 1; | ||
995 | } | ||
996 | |||
997 | dprintk("cpu %d, changing to fid 0x%x, vid 0x%x\n", | ||
998 | smp_processor_id(), fid, vid); | ||
999 | freqs.old = find_khz_freq_from_fid(data->currfid); | ||
1000 | freqs.new = find_khz_freq_from_fid(fid); | ||
1001 | |||
1002 | for_each_cpu_mask(i, *(data->available_cores)) { | ||
1003 | freqs.cpu = i; | ||
1004 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
1005 | } | ||
1006 | |||
1007 | res = transition_fid_vid(data, fid, vid); | ||
1008 | freqs.new = find_khz_freq_from_fid(data->currfid); | ||
1009 | |||
1010 | for_each_cpu_mask(i, *(data->available_cores)) { | ||
1011 | freqs.cpu = i; | ||
1012 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
1013 | } | ||
1014 | return res; | ||
1015 | } | ||
1016 | |||
1017 | /* Take a frequency, and issue the hardware pstate transition command */ | ||
1018 | static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned int index) | ||
1019 | { | ||
1020 | u32 fid = 0; | ||
1021 | u32 did = 0; | ||
1022 | u32 pstate = 0; | ||
1023 | int res, i; | ||
1024 | struct cpufreq_freqs freqs; | ||
1025 | |||
1026 | dprintk("cpu %d transition to index %u\n", smp_processor_id(), index); | ||
1027 | |||
1028 | /* get fid did for hardware pstate transition */ | ||
1029 | pstate = index & HW_PSTATE_MASK; | ||
1030 | if (pstate > MAX_HW_PSTATE) | ||
1031 | return 0; | ||
1032 | fid = (index & HW_FID_INDEX_MASK) >> HW_FID_INDEX_SHIFT; | ||
1033 | did = (index & HW_DID_INDEX_MASK) >> HW_DID_INDEX_SHIFT; | ||
1034 | freqs.old = find_khz_freq_from_fiddid(data->currfid, data->currdid); | ||
1035 | freqs.new = find_khz_freq_from_fiddid(fid, did); | ||
1036 | |||
1037 | for_each_cpu_mask(i, *(data->available_cores)) { | ||
1038 | freqs.cpu = i; | ||
1039 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
1040 | } | ||
1041 | |||
1042 | res = transition_pstate(data, pstate); | ||
1043 | data->currfid = find_fid_from_pstate(pstate); | ||
1044 | data->currdid = find_did_from_pstate(pstate); | ||
1045 | freqs.new = find_khz_freq_from_fiddid(data->currfid, data->currdid); | ||
1046 | |||
1047 | for_each_cpu_mask(i, *(data->available_cores)) { | ||
1048 | freqs.cpu = i; | ||
1049 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
1050 | } | ||
1051 | return res; | ||
1052 | } | ||
1053 | |||
1054 | /* Driver entry point to switch to the target frequency */ | ||
1055 | static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) | ||
1056 | { | ||
1057 | cpumask_t oldmask = CPU_MASK_ALL; | ||
1058 | struct powernow_k8_data *data = powernow_data[pol->cpu]; | ||
1059 | u32 checkfid; | ||
1060 | u32 checkvid; | ||
1061 | unsigned int newstate; | ||
1062 | int ret = -EIO; | ||
1063 | |||
1064 | if (!data) | ||
1065 | return -EINVAL; | ||
1066 | |||
1067 | checkfid = data->currfid; | ||
1068 | checkvid = data->currvid; | ||
1069 | |||
1070 | /* only run on specific CPU from here on */ | ||
1071 | oldmask = current->cpus_allowed; | ||
1072 | set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); | ||
1073 | |||
1074 | if (smp_processor_id() != pol->cpu) { | ||
1075 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); | ||
1076 | goto err_out; | ||
1077 | } | ||
1078 | |||
1079 | if (pending_bit_stuck()) { | ||
1080 | printk(KERN_ERR PFX "failing targ, change pending bit set\n"); | ||
1081 | goto err_out; | ||
1082 | } | ||
1083 | |||
1084 | dprintk("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", | ||
1085 | pol->cpu, targfreq, pol->min, pol->max, relation); | ||
1086 | |||
1087 | if (query_current_values_with_pending_wait(data)) | ||
1088 | goto err_out; | ||
1089 | |||
1090 | if (cpu_family == CPU_HW_PSTATE) | ||
1091 | dprintk("targ: curr fid 0x%x, did 0x%x\n", | ||
1092 | data->currfid, data->currdid); | ||
1093 | else { | ||
1094 | dprintk("targ: curr fid 0x%x, vid 0x%x\n", | ||
1095 | data->currfid, data->currvid); | ||
1096 | |||
1097 | if ((checkvid != data->currvid) || (checkfid != data->currfid)) { | ||
1098 | printk(KERN_INFO PFX | ||
1099 | "error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n", | ||
1100 | checkfid, data->currfid, checkvid, data->currvid); | ||
1101 | } | ||
1102 | } | ||
1103 | |||
1104 | if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate)) | ||
1105 | goto err_out; | ||
1106 | |||
1107 | mutex_lock(&fidvid_mutex); | ||
1108 | |||
1109 | powernow_k8_acpi_pst_values(data, newstate); | ||
1110 | |||
1111 | if (cpu_family == CPU_HW_PSTATE) | ||
1112 | ret = transition_frequency_pstate(data, newstate); | ||
1113 | else | ||
1114 | ret = transition_frequency_fidvid(data, newstate); | ||
1115 | if (ret) { | ||
1116 | printk(KERN_ERR PFX "transition frequency failed\n"); | ||
1117 | ret = 1; | ||
1118 | mutex_unlock(&fidvid_mutex); | ||
1119 | goto err_out; | ||
1120 | } | ||
1121 | mutex_unlock(&fidvid_mutex); | ||
1122 | |||
1123 | if (cpu_family == CPU_HW_PSTATE) | ||
1124 | pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid); | ||
1125 | else | ||
1126 | pol->cur = find_khz_freq_from_fid(data->currfid); | ||
1127 | ret = 0; | ||
1128 | |||
1129 | err_out: | ||
1130 | set_cpus_allowed(current, oldmask); | ||
1131 | return ret; | ||
1132 | } | ||
1133 | |||
1134 | /* Driver entry point to verify the policy and range of frequencies */ | ||
1135 | static int powernowk8_verify(struct cpufreq_policy *pol) | ||
1136 | { | ||
1137 | struct powernow_k8_data *data = powernow_data[pol->cpu]; | ||
1138 | |||
1139 | if (!data) | ||
1140 | return -EINVAL; | ||
1141 | |||
1142 | return cpufreq_frequency_table_verify(pol, data->powernow_table); | ||
1143 | } | ||
1144 | |||
1145 | /* per CPU init entry point to the driver */ | ||
1146 | static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | ||
1147 | { | ||
1148 | struct powernow_k8_data *data; | ||
1149 | cpumask_t oldmask = CPU_MASK_ALL; | ||
1150 | int rc; | ||
1151 | |||
1152 | if (!cpu_online(pol->cpu)) | ||
1153 | return -ENODEV; | ||
1154 | |||
1155 | if (!check_supported_cpu(pol->cpu)) | ||
1156 | return -ENODEV; | ||
1157 | |||
1158 | data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL); | ||
1159 | if (!data) { | ||
1160 | printk(KERN_ERR PFX "unable to alloc powernow_k8_data"); | ||
1161 | return -ENOMEM; | ||
1162 | } | ||
1163 | |||
1164 | data->cpu = pol->cpu; | ||
1165 | |||
1166 | if (powernow_k8_cpu_init_acpi(data)) { | ||
1167 | /* | ||
1168 | * Use the PSB BIOS structure. This is only availabe on | ||
1169 | * an UP version, and is deprecated by AMD. | ||
1170 | */ | ||
1171 | if (num_online_cpus() != 1) { | ||
1172 | printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n"); | ||
1173 | kfree(data); | ||
1174 | return -ENODEV; | ||
1175 | } | ||
1176 | if (pol->cpu != 0) { | ||
1177 | printk(KERN_ERR PFX "No _PSS objects for CPU other than CPU0\n"); | ||
1178 | kfree(data); | ||
1179 | return -ENODEV; | ||
1180 | } | ||
1181 | rc = find_psb_table(data); | ||
1182 | if (rc) { | ||
1183 | kfree(data); | ||
1184 | return -ENODEV; | ||
1185 | } | ||
1186 | } | ||
1187 | |||
1188 | /* only run on specific CPU from here on */ | ||
1189 | oldmask = current->cpus_allowed; | ||
1190 | set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); | ||
1191 | |||
1192 | if (smp_processor_id() != pol->cpu) { | ||
1193 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); | ||
1194 | goto err_out; | ||
1195 | } | ||
1196 | |||
1197 | if (pending_bit_stuck()) { | ||
1198 | printk(KERN_ERR PFX "failing init, change pending bit set\n"); | ||
1199 | goto err_out; | ||
1200 | } | ||
1201 | |||
1202 | if (query_current_values_with_pending_wait(data)) | ||
1203 | goto err_out; | ||
1204 | |||
1205 | if (cpu_family == CPU_OPTERON) | ||
1206 | fidvid_msr_init(); | ||
1207 | |||
1208 | /* run on any CPU again */ | ||
1209 | set_cpus_allowed(current, oldmask); | ||
1210 | |||
1211 | pol->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
1212 | if (cpu_family == CPU_HW_PSTATE) | ||
1213 | pol->cpus = cpumask_of_cpu(pol->cpu); | ||
1214 | else | ||
1215 | pol->cpus = cpu_core_map[pol->cpu]; | ||
1216 | data->available_cores = &(pol->cpus); | ||
1217 | |||
1218 | /* Take a crude guess here. | ||
1219 | * That guess was in microseconds, so multiply with 1000 */ | ||
1220 | pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US) | ||
1221 | + (3 * (1 << data->irt) * 10)) * 1000; | ||
1222 | |||
1223 | if (cpu_family == CPU_HW_PSTATE) | ||
1224 | pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid); | ||
1225 | else | ||
1226 | pol->cur = find_khz_freq_from_fid(data->currfid); | ||
1227 | dprintk("policy current frequency %d kHz\n", pol->cur); | ||
1228 | |||
1229 | /* min/max the cpu is capable of */ | ||
1230 | if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) { | ||
1231 | printk(KERN_ERR PFX "invalid powernow_table\n"); | ||
1232 | powernow_k8_cpu_exit_acpi(data); | ||
1233 | kfree(data->powernow_table); | ||
1234 | kfree(data); | ||
1235 | return -EINVAL; | ||
1236 | } | ||
1237 | |||
1238 | cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); | ||
1239 | |||
1240 | if (cpu_family == CPU_HW_PSTATE) | ||
1241 | dprintk("cpu_init done, current fid 0x%x, did 0x%x\n", | ||
1242 | data->currfid, data->currdid); | ||
1243 | else | ||
1244 | dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n", | ||
1245 | data->currfid, data->currvid); | ||
1246 | |||
1247 | powernow_data[pol->cpu] = data; | ||
1248 | |||
1249 | return 0; | ||
1250 | |||
1251 | err_out: | ||
1252 | set_cpus_allowed(current, oldmask); | ||
1253 | powernow_k8_cpu_exit_acpi(data); | ||
1254 | |||
1255 | kfree(data); | ||
1256 | return -ENODEV; | ||
1257 | } | ||
1258 | |||
1259 | static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol) | ||
1260 | { | ||
1261 | struct powernow_k8_data *data = powernow_data[pol->cpu]; | ||
1262 | |||
1263 | if (!data) | ||
1264 | return -EINVAL; | ||
1265 | |||
1266 | powernow_k8_cpu_exit_acpi(data); | ||
1267 | |||
1268 | cpufreq_frequency_table_put_attr(pol->cpu); | ||
1269 | |||
1270 | kfree(data->powernow_table); | ||
1271 | kfree(data); | ||
1272 | |||
1273 | return 0; | ||
1274 | } | ||
1275 | |||
1276 | static unsigned int powernowk8_get (unsigned int cpu) | ||
1277 | { | ||
1278 | struct powernow_k8_data *data; | ||
1279 | cpumask_t oldmask = current->cpus_allowed; | ||
1280 | unsigned int khz = 0; | ||
1281 | |||
1282 | data = powernow_data[first_cpu(cpu_core_map[cpu])]; | ||
1283 | |||
1284 | if (!data) | ||
1285 | return -EINVAL; | ||
1286 | |||
1287 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
1288 | if (smp_processor_id() != cpu) { | ||
1289 | printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu); | ||
1290 | set_cpus_allowed(current, oldmask); | ||
1291 | return 0; | ||
1292 | } | ||
1293 | |||
1294 | if (query_current_values_with_pending_wait(data)) | ||
1295 | goto out; | ||
1296 | |||
1297 | if (cpu_family == CPU_HW_PSTATE) | ||
1298 | khz = find_khz_freq_from_fiddid(data->currfid, data->currdid); | ||
1299 | else | ||
1300 | khz = find_khz_freq_from_fid(data->currfid); | ||
1301 | |||
1302 | |||
1303 | out: | ||
1304 | set_cpus_allowed(current, oldmask); | ||
1305 | return khz; | ||
1306 | } | ||
1307 | |||
1308 | static struct freq_attr* powernow_k8_attr[] = { | ||
1309 | &cpufreq_freq_attr_scaling_available_freqs, | ||
1310 | NULL, | ||
1311 | }; | ||
1312 | |||
1313 | static struct cpufreq_driver cpufreq_amd64_driver = { | ||
1314 | .verify = powernowk8_verify, | ||
1315 | .target = powernowk8_target, | ||
1316 | .init = powernowk8_cpu_init, | ||
1317 | .exit = __devexit_p(powernowk8_cpu_exit), | ||
1318 | .get = powernowk8_get, | ||
1319 | .name = "powernow-k8", | ||
1320 | .owner = THIS_MODULE, | ||
1321 | .attr = powernow_k8_attr, | ||
1322 | }; | ||
1323 | |||
1324 | /* driver entry point for init */ | ||
1325 | static int __cpuinit powernowk8_init(void) | ||
1326 | { | ||
1327 | unsigned int i, supported_cpus = 0; | ||
1328 | unsigned int booted_cores = 1; | ||
1329 | |||
1330 | for_each_online_cpu(i) { | ||
1331 | if (check_supported_cpu(i)) | ||
1332 | supported_cpus++; | ||
1333 | } | ||
1334 | |||
1335 | #ifdef CONFIG_SMP | ||
1336 | booted_cores = cpu_data[0].booted_cores; | ||
1337 | #endif | ||
1338 | |||
1339 | if (supported_cpus == num_online_cpus()) { | ||
1340 | printk(KERN_INFO PFX "Found %d %s " | ||
1341 | "processors (%d cpu cores) (" VERSION ")\n", | ||
1342 | supported_cpus/booted_cores, | ||
1343 | boot_cpu_data.x86_model_id, supported_cpus); | ||
1344 | return cpufreq_register_driver(&cpufreq_amd64_driver); | ||
1345 | } | ||
1346 | |||
1347 | return -ENODEV; | ||
1348 | } | ||
1349 | |||
1350 | /* driver entry point for term */ | ||
1351 | static void __exit powernowk8_exit(void) | ||
1352 | { | ||
1353 | dprintk("exit\n"); | ||
1354 | |||
1355 | cpufreq_unregister_driver(&cpufreq_amd64_driver); | ||
1356 | } | ||
1357 | |||
1358 | MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>"); | ||
1359 | MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver."); | ||
1360 | MODULE_LICENSE("GPL"); | ||
1361 | |||
1362 | late_initcall(powernowk8_init); | ||
1363 | module_exit(powernowk8_exit); | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h new file mode 100644 index 000000000000..b06c812208ca --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h | |||
@@ -0,0 +1,232 @@ | |||
1 | /* | ||
2 | * (c) 2003-2006 Advanced Micro Devices, Inc. | ||
3 | * Your use of this code is subject to the terms and conditions of the | ||
4 | * GNU general public license version 2. See "COPYING" or | ||
5 | * http://www.gnu.org/licenses/gpl.html | ||
6 | */ | ||
7 | |||
8 | struct powernow_k8_data { | ||
9 | unsigned int cpu; | ||
10 | |||
11 | u32 numps; /* number of p-states */ | ||
12 | u32 batps; /* number of p-states supported on battery */ | ||
13 | |||
14 | /* these values are constant when the PSB is used to determine | ||
15 | * vid/fid pairings, but are modified during the ->target() call | ||
16 | * when ACPI is used */ | ||
17 | u32 rvo; /* ramp voltage offset */ | ||
18 | u32 irt; /* isochronous relief time */ | ||
19 | u32 vidmvs; /* usable value calculated from mvs */ | ||
20 | u32 vstable; /* voltage stabilization time, units 20 us */ | ||
21 | u32 plllock; /* pll lock time, units 1 us */ | ||
22 | u32 exttype; /* extended interface = 1 */ | ||
23 | |||
24 | /* keep track of the current fid / vid or did */ | ||
25 | u32 currvid, currfid, currdid; | ||
26 | |||
27 | /* the powernow_table includes all frequency and vid/fid pairings: | ||
28 | * fid are the lower 8 bits of the index, vid are the upper 8 bits. | ||
29 | * frequency is in kHz */ | ||
30 | struct cpufreq_frequency_table *powernow_table; | ||
31 | |||
32 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI | ||
33 | /* the acpi table needs to be kept. it's only available if ACPI was | ||
34 | * used to determine valid frequency/vid/fid states */ | ||
35 | struct acpi_processor_performance acpi_data; | ||
36 | #endif | ||
37 | /* we need to keep track of associated cores, but let cpufreq | ||
38 | * handle hotplug events - so just point at cpufreq pol->cpus | ||
39 | * structure */ | ||
40 | cpumask_t *available_cores; | ||
41 | }; | ||
42 | |||
43 | |||
44 | /* processor's cpuid instruction support */ | ||
45 | #define CPUID_PROCESSOR_SIGNATURE 1 /* function 1 */ | ||
46 | #define CPUID_XFAM 0x0ff00000 /* extended family */ | ||
47 | #define CPUID_XFAM_K8 0 | ||
48 | #define CPUID_XMOD 0x000f0000 /* extended model */ | ||
49 | #define CPUID_XMOD_REV_MASK 0x00080000 | ||
50 | #define CPUID_XFAM_10H 0x00100000 /* family 0x10 */ | ||
51 | #define CPUID_USE_XFAM_XMOD 0x00000f00 | ||
52 | #define CPUID_GET_MAX_CAPABILITIES 0x80000000 | ||
53 | #define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007 | ||
54 | #define P_STATE_TRANSITION_CAPABLE 6 | ||
55 | |||
56 | /* Model Specific Registers for p-state transitions. MSRs are 64-bit. For */ | ||
57 | /* writes (wrmsr - opcode 0f 30), the register number is placed in ecx, and */ | ||
58 | /* the value to write is placed in edx:eax. For reads (rdmsr - opcode 0f 32), */ | ||
59 | /* the register number is placed in ecx, and the data is returned in edx:eax. */ | ||
60 | |||
61 | #define MSR_FIDVID_CTL 0xc0010041 | ||
62 | #define MSR_FIDVID_STATUS 0xc0010042 | ||
63 | |||
64 | /* Field definitions within the FID VID Low Control MSR : */ | ||
65 | #define MSR_C_LO_INIT_FID_VID 0x00010000 | ||
66 | #define MSR_C_LO_NEW_VID 0x00003f00 | ||
67 | #define MSR_C_LO_NEW_FID 0x0000003f | ||
68 | #define MSR_C_LO_VID_SHIFT 8 | ||
69 | |||
70 | /* Field definitions within the FID VID High Control MSR : */ | ||
71 | #define MSR_C_HI_STP_GNT_TO 0x000fffff | ||
72 | |||
73 | /* Field definitions within the FID VID Low Status MSR : */ | ||
74 | #define MSR_S_LO_CHANGE_PENDING 0x80000000 /* cleared when completed */ | ||
75 | #define MSR_S_LO_MAX_RAMP_VID 0x3f000000 | ||
76 | #define MSR_S_LO_MAX_FID 0x003f0000 | ||
77 | #define MSR_S_LO_START_FID 0x00003f00 | ||
78 | #define MSR_S_LO_CURRENT_FID 0x0000003f | ||
79 | |||
80 | /* Field definitions within the FID VID High Status MSR : */ | ||
81 | #define MSR_S_HI_MIN_WORKING_VID 0x3f000000 | ||
82 | #define MSR_S_HI_MAX_WORKING_VID 0x003f0000 | ||
83 | #define MSR_S_HI_START_VID 0x00003f00 | ||
84 | #define MSR_S_HI_CURRENT_VID 0x0000003f | ||
85 | #define MSR_C_HI_STP_GNT_BENIGN 0x00000001 | ||
86 | |||
87 | |||
88 | /* Hardware Pstate _PSS and MSR definitions */ | ||
89 | #define USE_HW_PSTATE 0x00000080 | ||
90 | #define HW_PSTATE_FID_MASK 0x0000003f | ||
91 | #define HW_PSTATE_DID_MASK 0x000001c0 | ||
92 | #define HW_PSTATE_DID_SHIFT 6 | ||
93 | #define HW_PSTATE_MASK 0x00000007 | ||
94 | #define HW_PSTATE_VALID_MASK 0x80000000 | ||
95 | #define HW_FID_INDEX_SHIFT 8 | ||
96 | #define HW_FID_INDEX_MASK 0x0000ff00 | ||
97 | #define HW_DID_INDEX_SHIFT 16 | ||
98 | #define HW_DID_INDEX_MASK 0x00ff0000 | ||
99 | #define HW_WATTS_MASK 0xff | ||
100 | #define HW_PWR_DVR_MASK 0x300 | ||
101 | #define HW_PWR_DVR_SHIFT 8 | ||
102 | #define HW_PWR_MAX_MULT 3 | ||
103 | #define MAX_HW_PSTATE 8 /* hw pstate supports up to 8 */ | ||
104 | #define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */ | ||
105 | #define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */ | ||
106 | #define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */ | ||
107 | |||
108 | /* define the two driver architectures */ | ||
109 | #define CPU_OPTERON 0 | ||
110 | #define CPU_HW_PSTATE 1 | ||
111 | |||
112 | |||
113 | /* | ||
114 | * There are restrictions frequencies have to follow: | ||
115 | * - only 1 entry in the low fid table ( <=1.4GHz ) | ||
116 | * - lowest entry in the high fid table must be >= 2 * the entry in the | ||
117 | * low fid table | ||
118 | * - lowest entry in the high fid table must be a <= 200MHz + 2 * the entry | ||
119 | * in the low fid table | ||
120 | * - the parts can only step at <= 200 MHz intervals, odd fid values are | ||
121 | * supported in revision G and later revisions. | ||
122 | * - lowest frequency must be >= interprocessor hypertransport link speed | ||
123 | * (only applies to MP systems obviously) | ||
124 | */ | ||
125 | |||
126 | /* fids (frequency identifiers) are arranged in 2 tables - lo and hi */ | ||
127 | #define LO_FID_TABLE_TOP 7 /* fid values marking the boundary */ | ||
128 | #define HI_FID_TABLE_BOTTOM 8 /* between the low and high tables */ | ||
129 | |||
130 | #define LO_VCOFREQ_TABLE_TOP 1400 /* corresponding vco frequency values */ | ||
131 | #define HI_VCOFREQ_TABLE_BOTTOM 1600 | ||
132 | |||
133 | #define MIN_FREQ_RESOLUTION 200 /* fids jump by 2 matching freq jumps by 200 */ | ||
134 | |||
135 | #define MAX_FID 0x2a /* Spec only gives FID values as far as 5 GHz */ | ||
136 | #define LEAST_VID 0x3e /* Lowest (numerically highest) useful vid value */ | ||
137 | |||
138 | #define MIN_FREQ 800 /* Min and max freqs, per spec */ | ||
139 | #define MAX_FREQ 5000 | ||
140 | |||
141 | #define INVALID_FID_MASK 0xffffffc0 /* not a valid fid if these bits are set */ | ||
142 | #define INVALID_VID_MASK 0xffffffc0 /* not a valid vid if these bits are set */ | ||
143 | |||
144 | #define VID_OFF 0x3f | ||
145 | |||
146 | #define STOP_GRANT_5NS 1 /* min poss memory access latency for voltage change */ | ||
147 | |||
148 | #define PLL_LOCK_CONVERSION (1000/5) /* ms to ns, then divide by clock period */ | ||
149 | |||
150 | #define MAXIMUM_VID_STEPS 1 /* Current cpus only allow a single step of 25mV */ | ||
151 | #define VST_UNITS_20US 20 /* Voltage Stabalization Time is in units of 20us */ | ||
152 | |||
153 | /* | ||
154 | * Most values of interest are enocoded in a single field of the _PSS | ||
155 | * entries: the "control" value. | ||
156 | */ | ||
157 | |||
158 | #define IRT_SHIFT 30 | ||
159 | #define RVO_SHIFT 28 | ||
160 | #define EXT_TYPE_SHIFT 27 | ||
161 | #define PLL_L_SHIFT 20 | ||
162 | #define MVS_SHIFT 18 | ||
163 | #define VST_SHIFT 11 | ||
164 | #define VID_SHIFT 6 | ||
165 | #define IRT_MASK 3 | ||
166 | #define RVO_MASK 3 | ||
167 | #define EXT_TYPE_MASK 1 | ||
168 | #define PLL_L_MASK 0x7f | ||
169 | #define MVS_MASK 3 | ||
170 | #define VST_MASK 0x7f | ||
171 | #define VID_MASK 0x1f | ||
172 | #define FID_MASK 0x1f | ||
173 | #define EXT_VID_MASK 0x3f | ||
174 | #define EXT_FID_MASK 0x3f | ||
175 | |||
176 | |||
177 | /* | ||
178 | * Version 1.4 of the PSB table. This table is constructed by BIOS and is | ||
179 | * to tell the OS's power management driver which VIDs and FIDs are | ||
180 | * supported by this particular processor. | ||
181 | * If the data in the PSB / PST is wrong, then this driver will program the | ||
182 | * wrong values into hardware, which is very likely to lead to a crash. | ||
183 | */ | ||
184 | |||
185 | #define PSB_ID_STRING "AMDK7PNOW!" | ||
186 | #define PSB_ID_STRING_LEN 10 | ||
187 | |||
188 | #define PSB_VERSION_1_4 0x14 | ||
189 | |||
190 | struct psb_s { | ||
191 | u8 signature[10]; | ||
192 | u8 tableversion; | ||
193 | u8 flags1; | ||
194 | u16 vstable; | ||
195 | u8 flags2; | ||
196 | u8 num_tables; | ||
197 | u32 cpuid; | ||
198 | u8 plllocktime; | ||
199 | u8 maxfid; | ||
200 | u8 maxvid; | ||
201 | u8 numps; | ||
202 | }; | ||
203 | |||
204 | /* Pairs of fid/vid values are appended to the version 1.4 PSB table. */ | ||
205 | struct pst_s { | ||
206 | u8 fid; | ||
207 | u8 vid; | ||
208 | }; | ||
209 | |||
210 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k8", msg) | ||
211 | |||
212 | static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid); | ||
213 | static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid); | ||
214 | static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid); | ||
215 | |||
216 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index); | ||
217 | |||
218 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI | ||
219 | static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); | ||
220 | static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); | ||
221 | #endif | ||
222 | |||
223 | #ifdef CONFIG_SMP | ||
224 | static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) | ||
225 | { | ||
226 | } | ||
227 | #else | ||
228 | static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) | ||
229 | { | ||
230 | cpu_set(0, cpu_sharedcore_mask[0]); | ||
231 | } | ||
232 | #endif | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c new file mode 100644 index 000000000000..b8fb4b521c62 --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c | |||
@@ -0,0 +1,191 @@ | |||
1 | /* | ||
2 | * sc520_freq.c: cpufreq driver for the AMD Elan sc520 | ||
3 | * | ||
4 | * Copyright (C) 2005 Sean Young <sean@mess.org> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * Based on elanfreq.c | ||
12 | * | ||
13 | * 2005-03-30: - initial revision | ||
14 | */ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/init.h> | ||
19 | |||
20 | #include <linux/delay.h> | ||
21 | #include <linux/cpufreq.h> | ||
22 | |||
23 | #include <asm/msr.h> | ||
24 | #include <asm/timex.h> | ||
25 | #include <asm/io.h> | ||
26 | |||
27 | #define MMCR_BASE 0xfffef000 /* The default base address */ | ||
28 | #define OFFS_CPUCTL 0x2 /* CPU Control Register */ | ||
29 | |||
30 | static __u8 __iomem *cpuctl; | ||
31 | |||
32 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "sc520_freq", msg) | ||
33 | |||
34 | static struct cpufreq_frequency_table sc520_freq_table[] = { | ||
35 | {0x01, 100000}, | ||
36 | {0x02, 133000}, | ||
37 | {0, CPUFREQ_TABLE_END}, | ||
38 | }; | ||
39 | |||
40 | static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu) | ||
41 | { | ||
42 | u8 clockspeed_reg = *cpuctl; | ||
43 | |||
44 | switch (clockspeed_reg & 0x03) { | ||
45 | default: | ||
46 | printk(KERN_ERR "sc520_freq: error: cpuctl register has unexpected value %02x\n", clockspeed_reg); | ||
47 | case 0x01: | ||
48 | return 100000; | ||
49 | case 0x02: | ||
50 | return 133000; | ||
51 | } | ||
52 | } | ||
53 | |||
54 | static void sc520_freq_set_cpu_state (unsigned int state) | ||
55 | { | ||
56 | |||
57 | struct cpufreq_freqs freqs; | ||
58 | u8 clockspeed_reg; | ||
59 | |||
60 | freqs.old = sc520_freq_get_cpu_frequency(0); | ||
61 | freqs.new = sc520_freq_table[state].frequency; | ||
62 | freqs.cpu = 0; /* AMD Elan is UP */ | ||
63 | |||
64 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
65 | |||
66 | dprintk("attempting to set frequency to %i kHz\n", | ||
67 | sc520_freq_table[state].frequency); | ||
68 | |||
69 | local_irq_disable(); | ||
70 | |||
71 | clockspeed_reg = *cpuctl & ~0x03; | ||
72 | *cpuctl = clockspeed_reg | sc520_freq_table[state].index; | ||
73 | |||
74 | local_irq_enable(); | ||
75 | |||
76 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
77 | }; | ||
78 | |||
79 | static int sc520_freq_verify (struct cpufreq_policy *policy) | ||
80 | { | ||
81 | return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]); | ||
82 | } | ||
83 | |||
84 | static int sc520_freq_target (struct cpufreq_policy *policy, | ||
85 | unsigned int target_freq, | ||
86 | unsigned int relation) | ||
87 | { | ||
88 | unsigned int newstate = 0; | ||
89 | |||
90 | if (cpufreq_frequency_table_target(policy, sc520_freq_table, target_freq, relation, &newstate)) | ||
91 | return -EINVAL; | ||
92 | |||
93 | sc520_freq_set_cpu_state(newstate); | ||
94 | |||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | |||
99 | /* | ||
100 | * Module init and exit code | ||
101 | */ | ||
102 | |||
103 | static int sc520_freq_cpu_init(struct cpufreq_policy *policy) | ||
104 | { | ||
105 | struct cpuinfo_x86 *c = cpu_data; | ||
106 | int result; | ||
107 | |||
108 | /* capability check */ | ||
109 | if (c->x86_vendor != X86_VENDOR_AMD || | ||
110 | c->x86 != 4 || c->x86_model != 9) | ||
111 | return -ENODEV; | ||
112 | |||
113 | /* cpuinfo and default policy values */ | ||
114 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
115 | policy->cpuinfo.transition_latency = 1000000; /* 1ms */ | ||
116 | policy->cur = sc520_freq_get_cpu_frequency(0); | ||
117 | |||
118 | result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table); | ||
119 | if (result) | ||
120 | return (result); | ||
121 | |||
122 | cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu); | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | |||
128 | static int sc520_freq_cpu_exit(struct cpufreq_policy *policy) | ||
129 | { | ||
130 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | |||
135 | static struct freq_attr* sc520_freq_attr[] = { | ||
136 | &cpufreq_freq_attr_scaling_available_freqs, | ||
137 | NULL, | ||
138 | }; | ||
139 | |||
140 | |||
141 | static struct cpufreq_driver sc520_freq_driver = { | ||
142 | .get = sc520_freq_get_cpu_frequency, | ||
143 | .verify = sc520_freq_verify, | ||
144 | .target = sc520_freq_target, | ||
145 | .init = sc520_freq_cpu_init, | ||
146 | .exit = sc520_freq_cpu_exit, | ||
147 | .name = "sc520_freq", | ||
148 | .owner = THIS_MODULE, | ||
149 | .attr = sc520_freq_attr, | ||
150 | }; | ||
151 | |||
152 | |||
153 | static int __init sc520_freq_init(void) | ||
154 | { | ||
155 | struct cpuinfo_x86 *c = cpu_data; | ||
156 | int err; | ||
157 | |||
158 | /* Test if we have the right hardware */ | ||
159 | if(c->x86_vendor != X86_VENDOR_AMD || | ||
160 | c->x86 != 4 || c->x86_model != 9) { | ||
161 | dprintk("no Elan SC520 processor found!\n"); | ||
162 | return -ENODEV; | ||
163 | } | ||
164 | cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1); | ||
165 | if(!cpuctl) { | ||
166 | printk(KERN_ERR "sc520_freq: error: failed to remap memory\n"); | ||
167 | return -ENOMEM; | ||
168 | } | ||
169 | |||
170 | err = cpufreq_register_driver(&sc520_freq_driver); | ||
171 | if (err) | ||
172 | iounmap(cpuctl); | ||
173 | |||
174 | return err; | ||
175 | } | ||
176 | |||
177 | |||
178 | static void __exit sc520_freq_exit(void) | ||
179 | { | ||
180 | cpufreq_unregister_driver(&sc520_freq_driver); | ||
181 | iounmap(cpuctl); | ||
182 | } | ||
183 | |||
184 | |||
185 | MODULE_LICENSE("GPL"); | ||
186 | MODULE_AUTHOR("Sean Young <sean@mess.org>"); | ||
187 | MODULE_DESCRIPTION("cpufreq driver for AMD's Elan sc520 CPU"); | ||
188 | |||
189 | module_init(sc520_freq_init); | ||
190 | module_exit(sc520_freq_exit); | ||
191 | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c new file mode 100644 index 000000000000..6c5dc2c85aeb --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | |||
@@ -0,0 +1,634 @@ | |||
1 | /* | ||
2 | * cpufreq driver for Enhanced SpeedStep, as found in Intel's Pentium | ||
3 | * M (part of the Centrino chipset). | ||
4 | * | ||
5 | * Since the original Pentium M, most new Intel CPUs support Enhanced | ||
6 | * SpeedStep. | ||
7 | * | ||
8 | * Despite the "SpeedStep" in the name, this is almost entirely unlike | ||
9 | * traditional SpeedStep. | ||
10 | * | ||
11 | * Modelled on speedstep.c | ||
12 | * | ||
13 | * Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org> | ||
14 | */ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/cpufreq.h> | ||
20 | #include <linux/sched.h> /* current */ | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/compiler.h> | ||
23 | |||
24 | #include <asm/msr.h> | ||
25 | #include <asm/processor.h> | ||
26 | #include <asm/cpufeature.h> | ||
27 | |||
28 | #define PFX "speedstep-centrino: " | ||
29 | #define MAINTAINER "cpufreq@lists.linux.org.uk" | ||
30 | |||
31 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) | ||
32 | |||
33 | #define INTEL_MSR_RANGE (0xffff) | ||
34 | |||
35 | struct cpu_id | ||
36 | { | ||
37 | __u8 x86; /* CPU family */ | ||
38 | __u8 x86_model; /* model */ | ||
39 | __u8 x86_mask; /* stepping */ | ||
40 | }; | ||
41 | |||
42 | enum { | ||
43 | CPU_BANIAS, | ||
44 | CPU_DOTHAN_A1, | ||
45 | CPU_DOTHAN_A2, | ||
46 | CPU_DOTHAN_B0, | ||
47 | CPU_MP4HT_D0, | ||
48 | CPU_MP4HT_E0, | ||
49 | }; | ||
50 | |||
51 | static const struct cpu_id cpu_ids[] = { | ||
52 | [CPU_BANIAS] = { 6, 9, 5 }, | ||
53 | [CPU_DOTHAN_A1] = { 6, 13, 1 }, | ||
54 | [CPU_DOTHAN_A2] = { 6, 13, 2 }, | ||
55 | [CPU_DOTHAN_B0] = { 6, 13, 6 }, | ||
56 | [CPU_MP4HT_D0] = {15, 3, 4 }, | ||
57 | [CPU_MP4HT_E0] = {15, 4, 1 }, | ||
58 | }; | ||
59 | #define N_IDS ARRAY_SIZE(cpu_ids) | ||
60 | |||
61 | struct cpu_model | ||
62 | { | ||
63 | const struct cpu_id *cpu_id; | ||
64 | const char *model_name; | ||
65 | unsigned max_freq; /* max clock in kHz */ | ||
66 | |||
67 | struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */ | ||
68 | }; | ||
69 | static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x); | ||
70 | |||
71 | /* Operating points for current CPU */ | ||
72 | static struct cpu_model *centrino_model[NR_CPUS]; | ||
73 | static const struct cpu_id *centrino_cpu[NR_CPUS]; | ||
74 | |||
75 | static struct cpufreq_driver centrino_driver; | ||
76 | |||
77 | #ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE | ||
78 | |||
79 | /* Computes the correct form for IA32_PERF_CTL MSR for a particular | ||
80 | frequency/voltage operating point; frequency in MHz, volts in mV. | ||
81 | This is stored as "index" in the structure. */ | ||
82 | #define OP(mhz, mv) \ | ||
83 | { \ | ||
84 | .frequency = (mhz) * 1000, \ | ||
85 | .index = (((mhz)/100) << 8) | ((mv - 700) / 16) \ | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * These voltage tables were derived from the Intel Pentium M | ||
90 | * datasheet, document 25261202.pdf, Table 5. I have verified they | ||
91 | * are consistent with my IBM ThinkPad X31, which has a 1.3GHz Pentium | ||
92 | * M. | ||
93 | */ | ||
94 | |||
95 | /* Ultra Low Voltage Intel Pentium M processor 900MHz (Banias) */ | ||
96 | static struct cpufreq_frequency_table banias_900[] = | ||
97 | { | ||
98 | OP(600, 844), | ||
99 | OP(800, 988), | ||
100 | OP(900, 1004), | ||
101 | { .frequency = CPUFREQ_TABLE_END } | ||
102 | }; | ||
103 | |||
104 | /* Ultra Low Voltage Intel Pentium M processor 1000MHz (Banias) */ | ||
105 | static struct cpufreq_frequency_table banias_1000[] = | ||
106 | { | ||
107 | OP(600, 844), | ||
108 | OP(800, 972), | ||
109 | OP(900, 988), | ||
110 | OP(1000, 1004), | ||
111 | { .frequency = CPUFREQ_TABLE_END } | ||
112 | }; | ||
113 | |||
114 | /* Low Voltage Intel Pentium M processor 1.10GHz (Banias) */ | ||
115 | static struct cpufreq_frequency_table banias_1100[] = | ||
116 | { | ||
117 | OP( 600, 956), | ||
118 | OP( 800, 1020), | ||
119 | OP( 900, 1100), | ||
120 | OP(1000, 1164), | ||
121 | OP(1100, 1180), | ||
122 | { .frequency = CPUFREQ_TABLE_END } | ||
123 | }; | ||
124 | |||
125 | |||
126 | /* Low Voltage Intel Pentium M processor 1.20GHz (Banias) */ | ||
127 | static struct cpufreq_frequency_table banias_1200[] = | ||
128 | { | ||
129 | OP( 600, 956), | ||
130 | OP( 800, 1004), | ||
131 | OP( 900, 1020), | ||
132 | OP(1000, 1100), | ||
133 | OP(1100, 1164), | ||
134 | OP(1200, 1180), | ||
135 | { .frequency = CPUFREQ_TABLE_END } | ||
136 | }; | ||
137 | |||
138 | /* Intel Pentium M processor 1.30GHz (Banias) */ | ||
139 | static struct cpufreq_frequency_table banias_1300[] = | ||
140 | { | ||
141 | OP( 600, 956), | ||
142 | OP( 800, 1260), | ||
143 | OP(1000, 1292), | ||
144 | OP(1200, 1356), | ||
145 | OP(1300, 1388), | ||
146 | { .frequency = CPUFREQ_TABLE_END } | ||
147 | }; | ||
148 | |||
149 | /* Intel Pentium M processor 1.40GHz (Banias) */ | ||
150 | static struct cpufreq_frequency_table banias_1400[] = | ||
151 | { | ||
152 | OP( 600, 956), | ||
153 | OP( 800, 1180), | ||
154 | OP(1000, 1308), | ||
155 | OP(1200, 1436), | ||
156 | OP(1400, 1484), | ||
157 | { .frequency = CPUFREQ_TABLE_END } | ||
158 | }; | ||
159 | |||
160 | /* Intel Pentium M processor 1.50GHz (Banias) */ | ||
161 | static struct cpufreq_frequency_table banias_1500[] = | ||
162 | { | ||
163 | OP( 600, 956), | ||
164 | OP( 800, 1116), | ||
165 | OP(1000, 1228), | ||
166 | OP(1200, 1356), | ||
167 | OP(1400, 1452), | ||
168 | OP(1500, 1484), | ||
169 | { .frequency = CPUFREQ_TABLE_END } | ||
170 | }; | ||
171 | |||
172 | /* Intel Pentium M processor 1.60GHz (Banias) */ | ||
173 | static struct cpufreq_frequency_table banias_1600[] = | ||
174 | { | ||
175 | OP( 600, 956), | ||
176 | OP( 800, 1036), | ||
177 | OP(1000, 1164), | ||
178 | OP(1200, 1276), | ||
179 | OP(1400, 1420), | ||
180 | OP(1600, 1484), | ||
181 | { .frequency = CPUFREQ_TABLE_END } | ||
182 | }; | ||
183 | |||
184 | /* Intel Pentium M processor 1.70GHz (Banias) */ | ||
185 | static struct cpufreq_frequency_table banias_1700[] = | ||
186 | { | ||
187 | OP( 600, 956), | ||
188 | OP( 800, 1004), | ||
189 | OP(1000, 1116), | ||
190 | OP(1200, 1228), | ||
191 | OP(1400, 1308), | ||
192 | OP(1700, 1484), | ||
193 | { .frequency = CPUFREQ_TABLE_END } | ||
194 | }; | ||
195 | #undef OP | ||
196 | |||
197 | #define _BANIAS(cpuid, max, name) \ | ||
198 | { .cpu_id = cpuid, \ | ||
199 | .model_name = "Intel(R) Pentium(R) M processor " name "MHz", \ | ||
200 | .max_freq = (max)*1000, \ | ||
201 | .op_points = banias_##max, \ | ||
202 | } | ||
203 | #define BANIAS(max) _BANIAS(&cpu_ids[CPU_BANIAS], max, #max) | ||
204 | |||
205 | /* CPU models, their operating frequency range, and freq/voltage | ||
206 | operating points */ | ||
207 | static struct cpu_model models[] = | ||
208 | { | ||
209 | _BANIAS(&cpu_ids[CPU_BANIAS], 900, " 900"), | ||
210 | BANIAS(1000), | ||
211 | BANIAS(1100), | ||
212 | BANIAS(1200), | ||
213 | BANIAS(1300), | ||
214 | BANIAS(1400), | ||
215 | BANIAS(1500), | ||
216 | BANIAS(1600), | ||
217 | BANIAS(1700), | ||
218 | |||
219 | /* NULL model_name is a wildcard */ | ||
220 | { &cpu_ids[CPU_DOTHAN_A1], NULL, 0, NULL }, | ||
221 | { &cpu_ids[CPU_DOTHAN_A2], NULL, 0, NULL }, | ||
222 | { &cpu_ids[CPU_DOTHAN_B0], NULL, 0, NULL }, | ||
223 | { &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL }, | ||
224 | { &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL }, | ||
225 | |||
226 | { NULL, } | ||
227 | }; | ||
228 | #undef _BANIAS | ||
229 | #undef BANIAS | ||
230 | |||
231 | static int centrino_cpu_init_table(struct cpufreq_policy *policy) | ||
232 | { | ||
233 | struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu]; | ||
234 | struct cpu_model *model; | ||
235 | |||
236 | for(model = models; model->cpu_id != NULL; model++) | ||
237 | if (centrino_verify_cpu_id(cpu, model->cpu_id) && | ||
238 | (model->model_name == NULL || | ||
239 | strcmp(cpu->x86_model_id, model->model_name) == 0)) | ||
240 | break; | ||
241 | |||
242 | if (model->cpu_id == NULL) { | ||
243 | /* No match at all */ | ||
244 | dprintk("no support for CPU model \"%s\": " | ||
245 | "send /proc/cpuinfo to " MAINTAINER "\n", | ||
246 | cpu->x86_model_id); | ||
247 | return -ENOENT; | ||
248 | } | ||
249 | |||
250 | if (model->op_points == NULL) { | ||
251 | /* Matched a non-match */ | ||
252 | dprintk("no table support for CPU model \"%s\"\n", | ||
253 | cpu->x86_model_id); | ||
254 | dprintk("try using the acpi-cpufreq driver\n"); | ||
255 | return -ENOENT; | ||
256 | } | ||
257 | |||
258 | centrino_model[policy->cpu] = model; | ||
259 | |||
260 | dprintk("found \"%s\": max frequency: %dkHz\n", | ||
261 | model->model_name, model->max_freq); | ||
262 | |||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | #else | ||
267 | static inline int centrino_cpu_init_table(struct cpufreq_policy *policy) { return -ENODEV; } | ||
268 | #endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */ | ||
269 | |||
270 | static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x) | ||
271 | { | ||
272 | if ((c->x86 == x->x86) && | ||
273 | (c->x86_model == x->x86_model) && | ||
274 | (c->x86_mask == x->x86_mask)) | ||
275 | return 1; | ||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | /* To be called only after centrino_model is initialized */ | ||
280 | static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe) | ||
281 | { | ||
282 | int i; | ||
283 | |||
284 | /* | ||
285 | * Extract clock in kHz from PERF_CTL value | ||
286 | * for centrino, as some DSDTs are buggy. | ||
287 | * Ideally, this can be done using the acpi_data structure. | ||
288 | */ | ||
289 | if ((centrino_cpu[cpu] == &cpu_ids[CPU_BANIAS]) || | ||
290 | (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_A1]) || | ||
291 | (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_B0])) { | ||
292 | msr = (msr >> 8) & 0xff; | ||
293 | return msr * 100000; | ||
294 | } | ||
295 | |||
296 | if ((!centrino_model[cpu]) || (!centrino_model[cpu]->op_points)) | ||
297 | return 0; | ||
298 | |||
299 | msr &= 0xffff; | ||
300 | for (i=0;centrino_model[cpu]->op_points[i].frequency != CPUFREQ_TABLE_END; i++) { | ||
301 | if (msr == centrino_model[cpu]->op_points[i].index) | ||
302 | return centrino_model[cpu]->op_points[i].frequency; | ||
303 | } | ||
304 | if (failsafe) | ||
305 | return centrino_model[cpu]->op_points[i-1].frequency; | ||
306 | else | ||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | /* Return the current CPU frequency in kHz */ | ||
311 | static unsigned int get_cur_freq(unsigned int cpu) | ||
312 | { | ||
313 | unsigned l, h; | ||
314 | unsigned clock_freq; | ||
315 | cpumask_t saved_mask; | ||
316 | |||
317 | saved_mask = current->cpus_allowed; | ||
318 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
319 | if (smp_processor_id() != cpu) | ||
320 | return 0; | ||
321 | |||
322 | rdmsr(MSR_IA32_PERF_STATUS, l, h); | ||
323 | clock_freq = extract_clock(l, cpu, 0); | ||
324 | |||
325 | if (unlikely(clock_freq == 0)) { | ||
326 | /* | ||
327 | * On some CPUs, we can see transient MSR values (which are | ||
328 | * not present in _PSS), while CPU is doing some automatic | ||
329 | * P-state transition (like TM2). Get the last freq set | ||
330 | * in PERF_CTL. | ||
331 | */ | ||
332 | rdmsr(MSR_IA32_PERF_CTL, l, h); | ||
333 | clock_freq = extract_clock(l, cpu, 1); | ||
334 | } | ||
335 | |||
336 | set_cpus_allowed(current, saved_mask); | ||
337 | return clock_freq; | ||
338 | } | ||
339 | |||
340 | |||
341 | static int centrino_cpu_init(struct cpufreq_policy *policy) | ||
342 | { | ||
343 | struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu]; | ||
344 | unsigned freq; | ||
345 | unsigned l, h; | ||
346 | int ret; | ||
347 | int i; | ||
348 | |||
349 | /* Only Intel makes Enhanced Speedstep-capable CPUs */ | ||
350 | if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST)) | ||
351 | return -ENODEV; | ||
352 | |||
353 | if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) | ||
354 | centrino_driver.flags |= CPUFREQ_CONST_LOOPS; | ||
355 | |||
356 | if (policy->cpu != 0) | ||
357 | return -ENODEV; | ||
358 | |||
359 | for (i = 0; i < N_IDS; i++) | ||
360 | if (centrino_verify_cpu_id(cpu, &cpu_ids[i])) | ||
361 | break; | ||
362 | |||
363 | if (i != N_IDS) | ||
364 | centrino_cpu[policy->cpu] = &cpu_ids[i]; | ||
365 | |||
366 | if (!centrino_cpu[policy->cpu]) { | ||
367 | dprintk("found unsupported CPU with " | ||
368 | "Enhanced SpeedStep: send /proc/cpuinfo to " | ||
369 | MAINTAINER "\n"); | ||
370 | return -ENODEV; | ||
371 | } | ||
372 | |||
373 | if (centrino_cpu_init_table(policy)) { | ||
374 | return -ENODEV; | ||
375 | } | ||
376 | |||
377 | /* Check to see if Enhanced SpeedStep is enabled, and try to | ||
378 | enable it if not. */ | ||
379 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | ||
380 | |||
381 | if (!(l & (1<<16))) { | ||
382 | l |= (1<<16); | ||
383 | dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); | ||
384 | wrmsr(MSR_IA32_MISC_ENABLE, l, h); | ||
385 | |||
386 | /* check to see if it stuck */ | ||
387 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | ||
388 | if (!(l & (1<<16))) { | ||
389 | printk(KERN_INFO PFX "couldn't enable Enhanced SpeedStep\n"); | ||
390 | return -ENODEV; | ||
391 | } | ||
392 | } | ||
393 | |||
394 | freq = get_cur_freq(policy->cpu); | ||
395 | |||
396 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
397 | policy->cpuinfo.transition_latency = 10000; /* 10uS transition latency */ | ||
398 | policy->cur = freq; | ||
399 | |||
400 | dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur); | ||
401 | |||
402 | ret = cpufreq_frequency_table_cpuinfo(policy, centrino_model[policy->cpu]->op_points); | ||
403 | if (ret) | ||
404 | return (ret); | ||
405 | |||
406 | cpufreq_frequency_table_get_attr(centrino_model[policy->cpu]->op_points, policy->cpu); | ||
407 | |||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | static int centrino_cpu_exit(struct cpufreq_policy *policy) | ||
412 | { | ||
413 | unsigned int cpu = policy->cpu; | ||
414 | |||
415 | if (!centrino_model[cpu]) | ||
416 | return -ENODEV; | ||
417 | |||
418 | cpufreq_frequency_table_put_attr(cpu); | ||
419 | |||
420 | centrino_model[cpu] = NULL; | ||
421 | |||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | /** | ||
426 | * centrino_verify - verifies a new CPUFreq policy | ||
427 | * @policy: new policy | ||
428 | * | ||
429 | * Limit must be within this model's frequency range at least one | ||
430 | * border included. | ||
431 | */ | ||
432 | static int centrino_verify (struct cpufreq_policy *policy) | ||
433 | { | ||
434 | return cpufreq_frequency_table_verify(policy, centrino_model[policy->cpu]->op_points); | ||
435 | } | ||
436 | |||
437 | /** | ||
438 | * centrino_setpolicy - set a new CPUFreq policy | ||
439 | * @policy: new policy | ||
440 | * @target_freq: the target frequency | ||
441 | * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) | ||
442 | * | ||
443 | * Sets a new CPUFreq policy. | ||
444 | */ | ||
445 | static int centrino_target (struct cpufreq_policy *policy, | ||
446 | unsigned int target_freq, | ||
447 | unsigned int relation) | ||
448 | { | ||
449 | unsigned int newstate = 0; | ||
450 | unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; | ||
451 | struct cpufreq_freqs freqs; | ||
452 | cpumask_t online_policy_cpus; | ||
453 | cpumask_t saved_mask; | ||
454 | cpumask_t set_mask; | ||
455 | cpumask_t covered_cpus; | ||
456 | int retval = 0; | ||
457 | unsigned int j, k, first_cpu, tmp; | ||
458 | |||
459 | if (unlikely(centrino_model[cpu] == NULL)) | ||
460 | return -ENODEV; | ||
461 | |||
462 | if (unlikely(cpufreq_frequency_table_target(policy, | ||
463 | centrino_model[cpu]->op_points, | ||
464 | target_freq, | ||
465 | relation, | ||
466 | &newstate))) { | ||
467 | return -EINVAL; | ||
468 | } | ||
469 | |||
470 | #ifdef CONFIG_HOTPLUG_CPU | ||
471 | /* cpufreq holds the hotplug lock, so we are safe from here on */ | ||
472 | cpus_and(online_policy_cpus, cpu_online_map, policy->cpus); | ||
473 | #else | ||
474 | online_policy_cpus = policy->cpus; | ||
475 | #endif | ||
476 | |||
477 | saved_mask = current->cpus_allowed; | ||
478 | first_cpu = 1; | ||
479 | cpus_clear(covered_cpus); | ||
480 | for_each_cpu_mask(j, online_policy_cpus) { | ||
481 | /* | ||
482 | * Support for SMP systems. | ||
483 | * Make sure we are running on CPU that wants to change freq | ||
484 | */ | ||
485 | cpus_clear(set_mask); | ||
486 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) | ||
487 | cpus_or(set_mask, set_mask, online_policy_cpus); | ||
488 | else | ||
489 | cpu_set(j, set_mask); | ||
490 | |||
491 | set_cpus_allowed(current, set_mask); | ||
492 | preempt_disable(); | ||
493 | if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) { | ||
494 | dprintk("couldn't limit to CPUs in this domain\n"); | ||
495 | retval = -EAGAIN; | ||
496 | if (first_cpu) { | ||
497 | /* We haven't started the transition yet. */ | ||
498 | goto migrate_end; | ||
499 | } | ||
500 | preempt_enable(); | ||
501 | break; | ||
502 | } | ||
503 | |||
504 | msr = centrino_model[cpu]->op_points[newstate].index; | ||
505 | |||
506 | if (first_cpu) { | ||
507 | rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); | ||
508 | if (msr == (oldmsr & 0xffff)) { | ||
509 | dprintk("no change needed - msr was and needs " | ||
510 | "to be %x\n", oldmsr); | ||
511 | retval = 0; | ||
512 | goto migrate_end; | ||
513 | } | ||
514 | |||
515 | freqs.old = extract_clock(oldmsr, cpu, 0); | ||
516 | freqs.new = extract_clock(msr, cpu, 0); | ||
517 | |||
518 | dprintk("target=%dkHz old=%d new=%d msr=%04x\n", | ||
519 | target_freq, freqs.old, freqs.new, msr); | ||
520 | |||
521 | for_each_cpu_mask(k, online_policy_cpus) { | ||
522 | freqs.cpu = k; | ||
523 | cpufreq_notify_transition(&freqs, | ||
524 | CPUFREQ_PRECHANGE); | ||
525 | } | ||
526 | |||
527 | first_cpu = 0; | ||
528 | /* all but 16 LSB are reserved, treat them with care */ | ||
529 | oldmsr &= ~0xffff; | ||
530 | msr &= 0xffff; | ||
531 | oldmsr |= msr; | ||
532 | } | ||
533 | |||
534 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | ||
535 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { | ||
536 | preempt_enable(); | ||
537 | break; | ||
538 | } | ||
539 | |||
540 | cpu_set(j, covered_cpus); | ||
541 | preempt_enable(); | ||
542 | } | ||
543 | |||
544 | for_each_cpu_mask(k, online_policy_cpus) { | ||
545 | freqs.cpu = k; | ||
546 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
547 | } | ||
548 | |||
549 | if (unlikely(retval)) { | ||
550 | /* | ||
551 | * We have failed halfway through the frequency change. | ||
552 | * We have sent callbacks to policy->cpus and | ||
553 | * MSRs have already been written on coverd_cpus. | ||
554 | * Best effort undo.. | ||
555 | */ | ||
556 | |||
557 | if (!cpus_empty(covered_cpus)) { | ||
558 | for_each_cpu_mask(j, covered_cpus) { | ||
559 | set_cpus_allowed(current, cpumask_of_cpu(j)); | ||
560 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | ||
561 | } | ||
562 | } | ||
563 | |||
564 | tmp = freqs.new; | ||
565 | freqs.new = freqs.old; | ||
566 | freqs.old = tmp; | ||
567 | for_each_cpu_mask(j, online_policy_cpus) { | ||
568 | freqs.cpu = j; | ||
569 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
570 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
571 | } | ||
572 | } | ||
573 | set_cpus_allowed(current, saved_mask); | ||
574 | return 0; | ||
575 | |||
576 | migrate_end: | ||
577 | preempt_enable(); | ||
578 | set_cpus_allowed(current, saved_mask); | ||
579 | return 0; | ||
580 | } | ||
581 | |||
582 | static struct freq_attr* centrino_attr[] = { | ||
583 | &cpufreq_freq_attr_scaling_available_freqs, | ||
584 | NULL, | ||
585 | }; | ||
586 | |||
587 | static struct cpufreq_driver centrino_driver = { | ||
588 | .name = "centrino", /* should be speedstep-centrino, | ||
589 | but there's a 16 char limit */ | ||
590 | .init = centrino_cpu_init, | ||
591 | .exit = centrino_cpu_exit, | ||
592 | .verify = centrino_verify, | ||
593 | .target = centrino_target, | ||
594 | .get = get_cur_freq, | ||
595 | .attr = centrino_attr, | ||
596 | .owner = THIS_MODULE, | ||
597 | }; | ||
598 | |||
599 | |||
600 | /** | ||
601 | * centrino_init - initializes the Enhanced SpeedStep CPUFreq driver | ||
602 | * | ||
603 | * Initializes the Enhanced SpeedStep support. Returns -ENODEV on | ||
604 | * unsupported devices, -ENOENT if there's no voltage table for this | ||
605 | * particular CPU model, -EINVAL on problems during initiatization, | ||
606 | * and zero on success. | ||
607 | * | ||
608 | * This is quite picky. Not only does the CPU have to advertise the | ||
609 | * "est" flag in the cpuid capability flags, we look for a specific | ||
610 | * CPU model and stepping, and we need to have the exact model name in | ||
611 | * our voltage tables. That is, be paranoid about not releasing | ||
612 | * someone's valuable magic smoke. | ||
613 | */ | ||
614 | static int __init centrino_init(void) | ||
615 | { | ||
616 | struct cpuinfo_x86 *cpu = cpu_data; | ||
617 | |||
618 | if (!cpu_has(cpu, X86_FEATURE_EST)) | ||
619 | return -ENODEV; | ||
620 | |||
621 | return cpufreq_register_driver(¢rino_driver); | ||
622 | } | ||
623 | |||
624 | static void __exit centrino_exit(void) | ||
625 | { | ||
626 | cpufreq_unregister_driver(¢rino_driver); | ||
627 | } | ||
628 | |||
629 | MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>"); | ||
630 | MODULE_DESCRIPTION ("Enhanced SpeedStep driver for Intel Pentium M processors."); | ||
631 | MODULE_LICENSE ("GPL"); | ||
632 | |||
633 | late_initcall(centrino_init); | ||
634 | module_exit(centrino_exit); | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c new file mode 100644 index 000000000000..a5b2346faf1f --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
@@ -0,0 +1,440 @@ | |||
1 | /* | ||
2 | * (C) 2001 Dave Jones, Arjan van de ven. | ||
3 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> | ||
4 | * | ||
5 | * Licensed under the terms of the GNU GPL License version 2. | ||
6 | * Based upon reverse engineered information, and on Intel documentation | ||
7 | * for chipsets ICH2-M and ICH3-M. | ||
8 | * | ||
9 | * Many thanks to Ducrot Bruno for finding and fixing the last | ||
10 | * "missing link" for ICH2-M/ICH3-M support, and to Thomas Winkler | ||
11 | * for extensive testing. | ||
12 | * | ||
13 | * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* | ||
14 | */ | ||
15 | |||
16 | |||
17 | /********************************************************************* | ||
18 | * SPEEDSTEP - DEFINITIONS * | ||
19 | *********************************************************************/ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/cpufreq.h> | ||
25 | #include <linux/pci.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/sched.h> | ||
28 | |||
29 | #include "speedstep-lib.h" | ||
30 | |||
31 | |||
32 | /* speedstep_chipset: | ||
33 | * It is necessary to know which chipset is used. As accesses to | ||
34 | * this device occur at various places in this module, we need a | ||
35 | * static struct pci_dev * pointing to that device. | ||
36 | */ | ||
37 | static struct pci_dev *speedstep_chipset_dev; | ||
38 | |||
39 | |||
40 | /* speedstep_processor | ||
41 | */ | ||
42 | static unsigned int speedstep_processor = 0; | ||
43 | |||
44 | static u32 pmbase; | ||
45 | |||
46 | /* | ||
47 | * There are only two frequency states for each processor. Values | ||
48 | * are in kHz for the time being. | ||
49 | */ | ||
50 | static struct cpufreq_frequency_table speedstep_freqs[] = { | ||
51 | {SPEEDSTEP_HIGH, 0}, | ||
52 | {SPEEDSTEP_LOW, 0}, | ||
53 | {0, CPUFREQ_TABLE_END}, | ||
54 | }; | ||
55 | |||
56 | |||
57 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-ich", msg) | ||
58 | |||
59 | |||
60 | /** | ||
61 | * speedstep_find_register - read the PMBASE address | ||
62 | * | ||
63 | * Returns: -ENODEV if no register could be found | ||
64 | */ | ||
65 | static int speedstep_find_register (void) | ||
66 | { | ||
67 | if (!speedstep_chipset_dev) | ||
68 | return -ENODEV; | ||
69 | |||
70 | /* get PMBASE */ | ||
71 | pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase); | ||
72 | if (!(pmbase & 0x01)) { | ||
73 | printk(KERN_ERR "speedstep-ich: could not find speedstep register\n"); | ||
74 | return -ENODEV; | ||
75 | } | ||
76 | |||
77 | pmbase &= 0xFFFFFFFE; | ||
78 | if (!pmbase) { | ||
79 | printk(KERN_ERR "speedstep-ich: could not find speedstep register\n"); | ||
80 | return -ENODEV; | ||
81 | } | ||
82 | |||
83 | dprintk("pmbase is 0x%x\n", pmbase); | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | /** | ||
88 | * speedstep_set_state - set the SpeedStep state | ||
89 | * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) | ||
90 | * | ||
91 | * Tries to change the SpeedStep state. | ||
92 | */ | ||
93 | static void speedstep_set_state (unsigned int state) | ||
94 | { | ||
95 | u8 pm2_blk; | ||
96 | u8 value; | ||
97 | unsigned long flags; | ||
98 | |||
99 | if (state > 0x1) | ||
100 | return; | ||
101 | |||
102 | /* Disable IRQs */ | ||
103 | local_irq_save(flags); | ||
104 | |||
105 | /* read state */ | ||
106 | value = inb(pmbase + 0x50); | ||
107 | |||
108 | dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); | ||
109 | |||
110 | /* write new state */ | ||
111 | value &= 0xFE; | ||
112 | value |= state; | ||
113 | |||
114 | dprintk("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase); | ||
115 | |||
116 | /* Disable bus master arbitration */ | ||
117 | pm2_blk = inb(pmbase + 0x20); | ||
118 | pm2_blk |= 0x01; | ||
119 | outb(pm2_blk, (pmbase + 0x20)); | ||
120 | |||
121 | /* Actual transition */ | ||
122 | outb(value, (pmbase + 0x50)); | ||
123 | |||
124 | /* Restore bus master arbitration */ | ||
125 | pm2_blk &= 0xfe; | ||
126 | outb(pm2_blk, (pmbase + 0x20)); | ||
127 | |||
128 | /* check if transition was successful */ | ||
129 | value = inb(pmbase + 0x50); | ||
130 | |||
131 | /* Enable IRQs */ | ||
132 | local_irq_restore(flags); | ||
133 | |||
134 | dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); | ||
135 | |||
136 | if (state == (value & 0x1)) { | ||
137 | dprintk("change to %u MHz succeeded\n", (speedstep_get_processor_frequency(speedstep_processor) / 1000)); | ||
138 | } else { | ||
139 | printk (KERN_ERR "cpufreq: change failed - I/O error\n"); | ||
140 | } | ||
141 | |||
142 | return; | ||
143 | } | ||
144 | |||
145 | |||
146 | /** | ||
147 | * speedstep_activate - activate SpeedStep control in the chipset | ||
148 | * | ||
149 | * Tries to activate the SpeedStep status and control registers. | ||
150 | * Returns -EINVAL on an unsupported chipset, and zero on success. | ||
151 | */ | ||
152 | static int speedstep_activate (void) | ||
153 | { | ||
154 | u16 value = 0; | ||
155 | |||
156 | if (!speedstep_chipset_dev) | ||
157 | return -EINVAL; | ||
158 | |||
159 | pci_read_config_word(speedstep_chipset_dev, 0x00A0, &value); | ||
160 | if (!(value & 0x08)) { | ||
161 | value |= 0x08; | ||
162 | dprintk("activating SpeedStep (TM) registers\n"); | ||
163 | pci_write_config_word(speedstep_chipset_dev, 0x00A0, value); | ||
164 | } | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | |||
170 | /** | ||
171 | * speedstep_detect_chipset - detect the Southbridge which contains SpeedStep logic | ||
172 | * | ||
173 | * Detects ICH2-M, ICH3-M and ICH4-M so far. The pci_dev points to | ||
174 | * the LPC bridge / PM module which contains all power-management | ||
175 | * functions. Returns the SPEEDSTEP_CHIPSET_-number for the detected | ||
176 | * chipset, or zero on failure. | ||
177 | */ | ||
178 | static unsigned int speedstep_detect_chipset (void) | ||
179 | { | ||
180 | speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, | ||
181 | PCI_DEVICE_ID_INTEL_82801DB_12, | ||
182 | PCI_ANY_ID, | ||
183 | PCI_ANY_ID, | ||
184 | NULL); | ||
185 | if (speedstep_chipset_dev) | ||
186 | return 4; /* 4-M */ | ||
187 | |||
188 | speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, | ||
189 | PCI_DEVICE_ID_INTEL_82801CA_12, | ||
190 | PCI_ANY_ID, | ||
191 | PCI_ANY_ID, | ||
192 | NULL); | ||
193 | if (speedstep_chipset_dev) | ||
194 | return 3; /* 3-M */ | ||
195 | |||
196 | |||
197 | speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, | ||
198 | PCI_DEVICE_ID_INTEL_82801BA_10, | ||
199 | PCI_ANY_ID, | ||
200 | PCI_ANY_ID, | ||
201 | NULL); | ||
202 | if (speedstep_chipset_dev) { | ||
203 | /* speedstep.c causes lockups on Dell Inspirons 8000 and | ||
204 | * 8100 which use a pretty old revision of the 82815 | ||
205 | * host brige. Abort on these systems. | ||
206 | */ | ||
207 | static struct pci_dev *hostbridge; | ||
208 | |||
209 | hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL, | ||
210 | PCI_DEVICE_ID_INTEL_82815_MC, | ||
211 | PCI_ANY_ID, | ||
212 | PCI_ANY_ID, | ||
213 | NULL); | ||
214 | |||
215 | if (!hostbridge) | ||
216 | return 2; /* 2-M */ | ||
217 | |||
218 | if (hostbridge->revision < 5) { | ||
219 | dprintk("hostbridge does not support speedstep\n"); | ||
220 | speedstep_chipset_dev = NULL; | ||
221 | pci_dev_put(hostbridge); | ||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | pci_dev_put(hostbridge); | ||
226 | return 2; /* 2-M */ | ||
227 | } | ||
228 | |||
229 | return 0; | ||
230 | } | ||
231 | |||
232 | static unsigned int _speedstep_get(cpumask_t cpus) | ||
233 | { | ||
234 | unsigned int speed; | ||
235 | cpumask_t cpus_allowed; | ||
236 | |||
237 | cpus_allowed = current->cpus_allowed; | ||
238 | set_cpus_allowed(current, cpus); | ||
239 | speed = speedstep_get_processor_frequency(speedstep_processor); | ||
240 | set_cpus_allowed(current, cpus_allowed); | ||
241 | dprintk("detected %u kHz as current frequency\n", speed); | ||
242 | return speed; | ||
243 | } | ||
244 | |||
245 | static unsigned int speedstep_get(unsigned int cpu) | ||
246 | { | ||
247 | return _speedstep_get(cpumask_of_cpu(cpu)); | ||
248 | } | ||
249 | |||
250 | /** | ||
251 | * speedstep_target - set a new CPUFreq policy | ||
252 | * @policy: new policy | ||
253 | * @target_freq: the target frequency | ||
254 | * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) | ||
255 | * | ||
256 | * Sets a new CPUFreq policy. | ||
257 | */ | ||
258 | static int speedstep_target (struct cpufreq_policy *policy, | ||
259 | unsigned int target_freq, | ||
260 | unsigned int relation) | ||
261 | { | ||
262 | unsigned int newstate = 0; | ||
263 | struct cpufreq_freqs freqs; | ||
264 | cpumask_t cpus_allowed; | ||
265 | int i; | ||
266 | |||
267 | if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) | ||
268 | return -EINVAL; | ||
269 | |||
270 | freqs.old = _speedstep_get(policy->cpus); | ||
271 | freqs.new = speedstep_freqs[newstate].frequency; | ||
272 | freqs.cpu = policy->cpu; | ||
273 | |||
274 | dprintk("transiting from %u to %u kHz\n", freqs.old, freqs.new); | ||
275 | |||
276 | /* no transition necessary */ | ||
277 | if (freqs.old == freqs.new) | ||
278 | return 0; | ||
279 | |||
280 | cpus_allowed = current->cpus_allowed; | ||
281 | |||
282 | for_each_cpu_mask(i, policy->cpus) { | ||
283 | freqs.cpu = i; | ||
284 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
285 | } | ||
286 | |||
287 | /* switch to physical CPU where state is to be changed */ | ||
288 | set_cpus_allowed(current, policy->cpus); | ||
289 | |||
290 | speedstep_set_state(newstate); | ||
291 | |||
292 | /* allow to be run on all CPUs */ | ||
293 | set_cpus_allowed(current, cpus_allowed); | ||
294 | |||
295 | for_each_cpu_mask(i, policy->cpus) { | ||
296 | freqs.cpu = i; | ||
297 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
298 | } | ||
299 | |||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | |||
304 | /** | ||
305 | * speedstep_verify - verifies a new CPUFreq policy | ||
306 | * @policy: new policy | ||
307 | * | ||
308 | * Limit must be within speedstep_low_freq and speedstep_high_freq, with | ||
309 | * at least one border included. | ||
310 | */ | ||
311 | static int speedstep_verify (struct cpufreq_policy *policy) | ||
312 | { | ||
313 | return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); | ||
314 | } | ||
315 | |||
316 | |||
317 | static int speedstep_cpu_init(struct cpufreq_policy *policy) | ||
318 | { | ||
319 | int result = 0; | ||
320 | unsigned int speed; | ||
321 | cpumask_t cpus_allowed; | ||
322 | |||
323 | /* only run on CPU to be set, or on its sibling */ | ||
324 | #ifdef CONFIG_SMP | ||
325 | policy->cpus = cpu_sibling_map[policy->cpu]; | ||
326 | #endif | ||
327 | |||
328 | cpus_allowed = current->cpus_allowed; | ||
329 | set_cpus_allowed(current, policy->cpus); | ||
330 | |||
331 | /* detect low and high frequency and transition latency */ | ||
332 | result = speedstep_get_freqs(speedstep_processor, | ||
333 | &speedstep_freqs[SPEEDSTEP_LOW].frequency, | ||
334 | &speedstep_freqs[SPEEDSTEP_HIGH].frequency, | ||
335 | &policy->cpuinfo.transition_latency, | ||
336 | &speedstep_set_state); | ||
337 | set_cpus_allowed(current, cpus_allowed); | ||
338 | if (result) | ||
339 | return result; | ||
340 | |||
341 | /* get current speed setting */ | ||
342 | speed = _speedstep_get(policy->cpus); | ||
343 | if (!speed) | ||
344 | return -EIO; | ||
345 | |||
346 | dprintk("currently at %s speed setting - %i MHz\n", | ||
347 | (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", | ||
348 | (speed / 1000)); | ||
349 | |||
350 | /* cpuinfo and default policy values */ | ||
351 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
352 | policy->cur = speed; | ||
353 | |||
354 | result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); | ||
355 | if (result) | ||
356 | return (result); | ||
357 | |||
358 | cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); | ||
359 | |||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | |||
364 | static int speedstep_cpu_exit(struct cpufreq_policy *policy) | ||
365 | { | ||
366 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | static struct freq_attr* speedstep_attr[] = { | ||
371 | &cpufreq_freq_attr_scaling_available_freqs, | ||
372 | NULL, | ||
373 | }; | ||
374 | |||
375 | |||
376 | static struct cpufreq_driver speedstep_driver = { | ||
377 | .name = "speedstep-ich", | ||
378 | .verify = speedstep_verify, | ||
379 | .target = speedstep_target, | ||
380 | .init = speedstep_cpu_init, | ||
381 | .exit = speedstep_cpu_exit, | ||
382 | .get = speedstep_get, | ||
383 | .owner = THIS_MODULE, | ||
384 | .attr = speedstep_attr, | ||
385 | }; | ||
386 | |||
387 | |||
388 | /** | ||
389 | * speedstep_init - initializes the SpeedStep CPUFreq driver | ||
390 | * | ||
391 | * Initializes the SpeedStep support. Returns -ENODEV on unsupported | ||
392 | * devices, -EINVAL on problems during initiatization, and zero on | ||
393 | * success. | ||
394 | */ | ||
395 | static int __init speedstep_init(void) | ||
396 | { | ||
397 | /* detect processor */ | ||
398 | speedstep_processor = speedstep_detect_processor(); | ||
399 | if (!speedstep_processor) { | ||
400 | dprintk("Intel(R) SpeedStep(TM) capable processor not found\n"); | ||
401 | return -ENODEV; | ||
402 | } | ||
403 | |||
404 | /* detect chipset */ | ||
405 | if (!speedstep_detect_chipset()) { | ||
406 | dprintk("Intel(R) SpeedStep(TM) for this chipset not (yet) available.\n"); | ||
407 | return -ENODEV; | ||
408 | } | ||
409 | |||
410 | /* activate speedstep support */ | ||
411 | if (speedstep_activate()) { | ||
412 | pci_dev_put(speedstep_chipset_dev); | ||
413 | return -EINVAL; | ||
414 | } | ||
415 | |||
416 | if (speedstep_find_register()) | ||
417 | return -ENODEV; | ||
418 | |||
419 | return cpufreq_register_driver(&speedstep_driver); | ||
420 | } | ||
421 | |||
422 | |||
423 | /** | ||
424 | * speedstep_exit - unregisters SpeedStep support | ||
425 | * | ||
426 | * Unregisters SpeedStep support. | ||
427 | */ | ||
428 | static void __exit speedstep_exit(void) | ||
429 | { | ||
430 | pci_dev_put(speedstep_chipset_dev); | ||
431 | cpufreq_unregister_driver(&speedstep_driver); | ||
432 | } | ||
433 | |||
434 | |||
435 | MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>, Dominik Brodowski <linux@brodo.de>"); | ||
436 | MODULE_DESCRIPTION ("Speedstep driver for Intel mobile processors on chipsets with ICH-M southbridges."); | ||
437 | MODULE_LICENSE ("GPL"); | ||
438 | |||
439 | module_init(speedstep_init); | ||
440 | module_exit(speedstep_exit); | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c new file mode 100644 index 000000000000..b1acc8ce3167 --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c | |||
@@ -0,0 +1,444 @@ | |||
1 | /* | ||
2 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> | ||
3 | * | ||
4 | * Licensed under the terms of the GNU GPL License version 2. | ||
5 | * | ||
6 | * Library for common functions for Intel SpeedStep v.1 and v.2 support | ||
7 | * | ||
8 | * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/moduleparam.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/cpufreq.h> | ||
16 | #include <linux/slab.h> | ||
17 | |||
18 | #include <asm/msr.h> | ||
19 | #include "speedstep-lib.h" | ||
20 | |||
21 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-lib", msg) | ||
22 | |||
23 | #ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK | ||
24 | static int relaxed_check = 0; | ||
25 | #else | ||
26 | #define relaxed_check 0 | ||
27 | #endif | ||
28 | |||
29 | /********************************************************************* | ||
30 | * GET PROCESSOR CORE SPEED IN KHZ * | ||
31 | *********************************************************************/ | ||
32 | |||
33 | static unsigned int pentium3_get_frequency (unsigned int processor) | ||
34 | { | ||
35 | /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ | ||
36 | struct { | ||
37 | unsigned int ratio; /* Frequency Multiplier (x10) */ | ||
38 | u8 bitmap; /* power on configuration bits | ||
39 | [27, 25:22] (in MSR 0x2a) */ | ||
40 | } msr_decode_mult [] = { | ||
41 | { 30, 0x01 }, | ||
42 | { 35, 0x05 }, | ||
43 | { 40, 0x02 }, | ||
44 | { 45, 0x06 }, | ||
45 | { 50, 0x00 }, | ||
46 | { 55, 0x04 }, | ||
47 | { 60, 0x0b }, | ||
48 | { 65, 0x0f }, | ||
49 | { 70, 0x09 }, | ||
50 | { 75, 0x0d }, | ||
51 | { 80, 0x0a }, | ||
52 | { 85, 0x26 }, | ||
53 | { 90, 0x20 }, | ||
54 | { 100, 0x2b }, | ||
55 | { 0, 0xff } /* error or unknown value */ | ||
56 | }; | ||
57 | |||
58 | /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */ | ||
59 | struct { | ||
60 | unsigned int value; /* Front Side Bus speed in MHz */ | ||
61 | u8 bitmap; /* power on configuration bits [18: 19] | ||
62 | (in MSR 0x2a) */ | ||
63 | } msr_decode_fsb [] = { | ||
64 | { 66, 0x0 }, | ||
65 | { 100, 0x2 }, | ||
66 | { 133, 0x1 }, | ||
67 | { 0, 0xff} | ||
68 | }; | ||
69 | |||
70 | u32 msr_lo, msr_tmp; | ||
71 | int i = 0, j = 0; | ||
72 | |||
73 | /* read MSR 0x2a - we only need the low 32 bits */ | ||
74 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); | ||
75 | dprintk("P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); | ||
76 | msr_tmp = msr_lo; | ||
77 | |||
78 | /* decode the FSB */ | ||
79 | msr_tmp &= 0x00c0000; | ||
80 | msr_tmp >>= 18; | ||
81 | while (msr_tmp != msr_decode_fsb[i].bitmap) { | ||
82 | if (msr_decode_fsb[i].bitmap == 0xff) | ||
83 | return 0; | ||
84 | i++; | ||
85 | } | ||
86 | |||
87 | /* decode the multiplier */ | ||
88 | if (processor == SPEEDSTEP_PROCESSOR_PIII_C_EARLY) { | ||
89 | dprintk("workaround for early PIIIs\n"); | ||
90 | msr_lo &= 0x03c00000; | ||
91 | } else | ||
92 | msr_lo &= 0x0bc00000; | ||
93 | msr_lo >>= 22; | ||
94 | while (msr_lo != msr_decode_mult[j].bitmap) { | ||
95 | if (msr_decode_mult[j].bitmap == 0xff) | ||
96 | return 0; | ||
97 | j++; | ||
98 | } | ||
99 | |||
100 | dprintk("speed is %u\n", (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100)); | ||
101 | |||
102 | return (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100); | ||
103 | } | ||
104 | |||
105 | |||
106 | static unsigned int pentiumM_get_frequency(void) | ||
107 | { | ||
108 | u32 msr_lo, msr_tmp; | ||
109 | |||
110 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); | ||
111 | dprintk("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); | ||
112 | |||
113 | /* see table B-2 of 24547212.pdf */ | ||
114 | if (msr_lo & 0x00040000) { | ||
115 | printk(KERN_DEBUG "speedstep-lib: PM - invalid FSB: 0x%x 0x%x\n", msr_lo, msr_tmp); | ||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | msr_tmp = (msr_lo >> 22) & 0x1f; | ||
120 | dprintk("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * 100 * 1000)); | ||
121 | |||
122 | return (msr_tmp * 100 * 1000); | ||
123 | } | ||
124 | |||
125 | static unsigned int pentium_core_get_frequency(void) | ||
126 | { | ||
127 | u32 fsb = 0; | ||
128 | u32 msr_lo, msr_tmp; | ||
129 | |||
130 | rdmsr(MSR_FSB_FREQ, msr_lo, msr_tmp); | ||
131 | /* see table B-2 of 25366920.pdf */ | ||
132 | switch (msr_lo & 0x07) { | ||
133 | case 5: | ||
134 | fsb = 100000; | ||
135 | break; | ||
136 | case 1: | ||
137 | fsb = 133333; | ||
138 | break; | ||
139 | case 3: | ||
140 | fsb = 166667; | ||
141 | break; | ||
142 | default: | ||
143 | printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value"); | ||
144 | } | ||
145 | |||
146 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); | ||
147 | dprintk("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); | ||
148 | |||
149 | msr_tmp = (msr_lo >> 22) & 0x1f; | ||
150 | dprintk("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * fsb)); | ||
151 | |||
152 | return (msr_tmp * fsb); | ||
153 | } | ||
154 | |||
155 | |||
156 | static unsigned int pentium4_get_frequency(void) | ||
157 | { | ||
158 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
159 | u32 msr_lo, msr_hi, mult; | ||
160 | unsigned int fsb = 0; | ||
161 | |||
162 | rdmsr(0x2c, msr_lo, msr_hi); | ||
163 | |||
164 | dprintk("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); | ||
165 | |||
166 | /* decode the FSB: see IA-32 Intel (C) Architecture Software | ||
167 | * Developer's Manual, Volume 3: System Prgramming Guide, | ||
168 | * revision #12 in Table B-1: MSRs in the Pentium 4 and | ||
169 | * Intel Xeon Processors, on page B-4 and B-5. | ||
170 | */ | ||
171 | if (c->x86_model < 2) | ||
172 | fsb = 100 * 1000; | ||
173 | else { | ||
174 | u8 fsb_code = (msr_lo >> 16) & 0x7; | ||
175 | switch (fsb_code) { | ||
176 | case 0: | ||
177 | fsb = 100 * 1000; | ||
178 | break; | ||
179 | case 1: | ||
180 | fsb = 13333 * 10; | ||
181 | break; | ||
182 | case 2: | ||
183 | fsb = 200 * 1000; | ||
184 | break; | ||
185 | } | ||
186 | } | ||
187 | |||
188 | if (!fsb) | ||
189 | printk(KERN_DEBUG "speedstep-lib: couldn't detect FSB speed. Please send an e-mail to <linux@brodo.de>\n"); | ||
190 | |||
191 | /* Multiplier. */ | ||
192 | if (c->x86_model < 2) | ||
193 | mult = msr_lo >> 27; | ||
194 | else | ||
195 | mult = msr_lo >> 24; | ||
196 | |||
197 | dprintk("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", fsb, mult, (fsb * mult)); | ||
198 | |||
199 | return (fsb * mult); | ||
200 | } | ||
201 | |||
202 | |||
203 | unsigned int speedstep_get_processor_frequency(unsigned int processor) | ||
204 | { | ||
205 | switch (processor) { | ||
206 | case SPEEDSTEP_PROCESSOR_PCORE: | ||
207 | return pentium_core_get_frequency(); | ||
208 | case SPEEDSTEP_PROCESSOR_PM: | ||
209 | return pentiumM_get_frequency(); | ||
210 | case SPEEDSTEP_PROCESSOR_P4D: | ||
211 | case SPEEDSTEP_PROCESSOR_P4M: | ||
212 | return pentium4_get_frequency(); | ||
213 | case SPEEDSTEP_PROCESSOR_PIII_T: | ||
214 | case SPEEDSTEP_PROCESSOR_PIII_C: | ||
215 | case SPEEDSTEP_PROCESSOR_PIII_C_EARLY: | ||
216 | return pentium3_get_frequency(processor); | ||
217 | default: | ||
218 | return 0; | ||
219 | }; | ||
220 | return 0; | ||
221 | } | ||
222 | EXPORT_SYMBOL_GPL(speedstep_get_processor_frequency); | ||
223 | |||
224 | |||
225 | /********************************************************************* | ||
226 | * DETECT SPEEDSTEP-CAPABLE PROCESSOR * | ||
227 | *********************************************************************/ | ||
228 | |||
229 | unsigned int speedstep_detect_processor (void) | ||
230 | { | ||
231 | struct cpuinfo_x86 *c = cpu_data; | ||
232 | u32 ebx, msr_lo, msr_hi; | ||
233 | |||
234 | dprintk("x86: %x, model: %x\n", c->x86, c->x86_model); | ||
235 | |||
236 | if ((c->x86_vendor != X86_VENDOR_INTEL) || | ||
237 | ((c->x86 != 6) && (c->x86 != 0xF))) | ||
238 | return 0; | ||
239 | |||
240 | if (c->x86 == 0xF) { | ||
241 | /* Intel Mobile Pentium 4-M | ||
242 | * or Intel Mobile Pentium 4 with 533 MHz FSB */ | ||
243 | if (c->x86_model != 2) | ||
244 | return 0; | ||
245 | |||
246 | ebx = cpuid_ebx(0x00000001); | ||
247 | ebx &= 0x000000FF; | ||
248 | |||
249 | dprintk("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); | ||
250 | |||
251 | switch (c->x86_mask) { | ||
252 | case 4: | ||
253 | /* | ||
254 | * B-stepping [M-P4-M] | ||
255 | * sample has ebx = 0x0f, production has 0x0e. | ||
256 | */ | ||
257 | if ((ebx == 0x0e) || (ebx == 0x0f)) | ||
258 | return SPEEDSTEP_PROCESSOR_P4M; | ||
259 | break; | ||
260 | case 7: | ||
261 | /* | ||
262 | * C-stepping [M-P4-M] | ||
263 | * needs to have ebx=0x0e, else it's a celeron: | ||
264 | * cf. 25130917.pdf / page 7, footnote 5 even | ||
265 | * though 25072120.pdf / page 7 doesn't say | ||
266 | * samples are only of B-stepping... | ||
267 | */ | ||
268 | if (ebx == 0x0e) | ||
269 | return SPEEDSTEP_PROCESSOR_P4M; | ||
270 | break; | ||
271 | case 9: | ||
272 | /* | ||
273 | * D-stepping [M-P4-M or M-P4/533] | ||
274 | * | ||
275 | * this is totally strange: CPUID 0x0F29 is | ||
276 | * used by M-P4-M, M-P4/533 and(!) Celeron CPUs. | ||
277 | * The latter need to be sorted out as they don't | ||
278 | * support speedstep. | ||
279 | * Celerons with CPUID 0x0F29 may have either | ||
280 | * ebx=0x8 or 0xf -- 25130917.pdf doesn't say anything | ||
281 | * specific. | ||
282 | * M-P4-Ms may have either ebx=0xe or 0xf [see above] | ||
283 | * M-P4/533 have either ebx=0xe or 0xf. [25317607.pdf] | ||
284 | * also, M-P4M HTs have ebx=0x8, too | ||
285 | * For now, they are distinguished by the model_id string | ||
286 | */ | ||
287 | if ((ebx == 0x0e) || (strstr(c->x86_model_id,"Mobile Intel(R) Pentium(R) 4") != NULL)) | ||
288 | return SPEEDSTEP_PROCESSOR_P4M; | ||
289 | break; | ||
290 | default: | ||
291 | break; | ||
292 | } | ||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | switch (c->x86_model) { | ||
297 | case 0x0B: /* Intel PIII [Tualatin] */ | ||
298 | /* cpuid_ebx(1) is 0x04 for desktop PIII, 0x06 for mobile PIII-M */ | ||
299 | ebx = cpuid_ebx(0x00000001); | ||
300 | dprintk("ebx is %x\n", ebx); | ||
301 | |||
302 | ebx &= 0x000000FF; | ||
303 | |||
304 | if (ebx != 0x06) | ||
305 | return 0; | ||
306 | |||
307 | /* So far all PIII-M processors support SpeedStep. See | ||
308 | * Intel's 24540640.pdf of June 2003 | ||
309 | */ | ||
310 | return SPEEDSTEP_PROCESSOR_PIII_T; | ||
311 | |||
312 | case 0x08: /* Intel PIII [Coppermine] */ | ||
313 | |||
314 | /* all mobile PIII Coppermines have FSB 100 MHz | ||
315 | * ==> sort out a few desktop PIIIs. */ | ||
316 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi); | ||
317 | dprintk("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", msr_lo, msr_hi); | ||
318 | msr_lo &= 0x00c0000; | ||
319 | if (msr_lo != 0x0080000) | ||
320 | return 0; | ||
321 | |||
322 | /* | ||
323 | * If the processor is a mobile version, | ||
324 | * platform ID has bit 50 set | ||
325 | * it has SpeedStep technology if either | ||
326 | * bit 56 or 57 is set | ||
327 | */ | ||
328 | rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi); | ||
329 | dprintk("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", msr_lo, msr_hi); | ||
330 | if ((msr_hi & (1<<18)) && (relaxed_check ? 1 : (msr_hi & (3<<24)))) { | ||
331 | if (c->x86_mask == 0x01) { | ||
332 | dprintk("early PIII version\n"); | ||
333 | return SPEEDSTEP_PROCESSOR_PIII_C_EARLY; | ||
334 | } else | ||
335 | return SPEEDSTEP_PROCESSOR_PIII_C; | ||
336 | } | ||
337 | |||
338 | default: | ||
339 | return 0; | ||
340 | } | ||
341 | } | ||
342 | EXPORT_SYMBOL_GPL(speedstep_detect_processor); | ||
343 | |||
344 | |||
345 | /********************************************************************* | ||
346 | * DETECT SPEEDSTEP SPEEDS * | ||
347 | *********************************************************************/ | ||
348 | |||
349 | unsigned int speedstep_get_freqs(unsigned int processor, | ||
350 | unsigned int *low_speed, | ||
351 | unsigned int *high_speed, | ||
352 | unsigned int *transition_latency, | ||
353 | void (*set_state) (unsigned int state)) | ||
354 | { | ||
355 | unsigned int prev_speed; | ||
356 | unsigned int ret = 0; | ||
357 | unsigned long flags; | ||
358 | struct timeval tv1, tv2; | ||
359 | |||
360 | if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) | ||
361 | return -EINVAL; | ||
362 | |||
363 | dprintk("trying to determine both speeds\n"); | ||
364 | |||
365 | /* get current speed */ | ||
366 | prev_speed = speedstep_get_processor_frequency(processor); | ||
367 | if (!prev_speed) | ||
368 | return -EIO; | ||
369 | |||
370 | dprintk("previous speed is %u\n", prev_speed); | ||
371 | |||
372 | local_irq_save(flags); | ||
373 | |||
374 | /* switch to low state */ | ||
375 | set_state(SPEEDSTEP_LOW); | ||
376 | *low_speed = speedstep_get_processor_frequency(processor); | ||
377 | if (!*low_speed) { | ||
378 | ret = -EIO; | ||
379 | goto out; | ||
380 | } | ||
381 | |||
382 | dprintk("low speed is %u\n", *low_speed); | ||
383 | |||
384 | /* start latency measurement */ | ||
385 | if (transition_latency) | ||
386 | do_gettimeofday(&tv1); | ||
387 | |||
388 | /* switch to high state */ | ||
389 | set_state(SPEEDSTEP_HIGH); | ||
390 | |||
391 | /* end latency measurement */ | ||
392 | if (transition_latency) | ||
393 | do_gettimeofday(&tv2); | ||
394 | |||
395 | *high_speed = speedstep_get_processor_frequency(processor); | ||
396 | if (!*high_speed) { | ||
397 | ret = -EIO; | ||
398 | goto out; | ||
399 | } | ||
400 | |||
401 | dprintk("high speed is %u\n", *high_speed); | ||
402 | |||
403 | if (*low_speed == *high_speed) { | ||
404 | ret = -ENODEV; | ||
405 | goto out; | ||
406 | } | ||
407 | |||
408 | /* switch to previous state, if necessary */ | ||
409 | if (*high_speed != prev_speed) | ||
410 | set_state(SPEEDSTEP_LOW); | ||
411 | |||
412 | if (transition_latency) { | ||
413 | *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC + | ||
414 | tv2.tv_usec - tv1.tv_usec; | ||
415 | dprintk("transition latency is %u uSec\n", *transition_latency); | ||
416 | |||
417 | /* convert uSec to nSec and add 20% for safety reasons */ | ||
418 | *transition_latency *= 1200; | ||
419 | |||
420 | /* check if the latency measurement is too high or too low | ||
421 | * and set it to a safe value (500uSec) in that case | ||
422 | */ | ||
423 | if (*transition_latency > 10000000 || *transition_latency < 50000) { | ||
424 | printk (KERN_WARNING "speedstep: frequency transition measured seems out of " | ||
425 | "range (%u nSec), falling back to a safe one of %u nSec.\n", | ||
426 | *transition_latency, 500000); | ||
427 | *transition_latency = 500000; | ||
428 | } | ||
429 | } | ||
430 | |||
431 | out: | ||
432 | local_irq_restore(flags); | ||
433 | return (ret); | ||
434 | } | ||
435 | EXPORT_SYMBOL_GPL(speedstep_get_freqs); | ||
436 | |||
437 | #ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK | ||
438 | module_param(relaxed_check, int, 0444); | ||
439 | MODULE_PARM_DESC(relaxed_check, "Don't do all checks for speedstep capability."); | ||
440 | #endif | ||
441 | |||
442 | MODULE_AUTHOR ("Dominik Brodowski <linux@brodo.de>"); | ||
443 | MODULE_DESCRIPTION ("Library for Intel SpeedStep 1 or 2 cpufreq drivers."); | ||
444 | MODULE_LICENSE ("GPL"); | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h new file mode 100644 index 000000000000..b11bcc608cac --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h | |||
@@ -0,0 +1,49 @@ | |||
1 | /* | ||
2 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> | ||
3 | * | ||
4 | * Licensed under the terms of the GNU GPL License version 2. | ||
5 | * | ||
6 | * Library for common functions for Intel SpeedStep v.1 and v.2 support | ||
7 | * | ||
8 | * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* | ||
9 | */ | ||
10 | |||
11 | |||
12 | |||
13 | /* processors */ | ||
14 | |||
15 | #define SPEEDSTEP_PROCESSOR_PIII_C_EARLY 0x00000001 /* Coppermine core */ | ||
16 | #define SPEEDSTEP_PROCESSOR_PIII_C 0x00000002 /* Coppermine core */ | ||
17 | #define SPEEDSTEP_PROCESSOR_PIII_T 0x00000003 /* Tualatin core */ | ||
18 | #define SPEEDSTEP_PROCESSOR_P4M 0x00000004 /* P4-M */ | ||
19 | |||
20 | /* the following processors are not speedstep-capable and are not auto-detected | ||
21 | * in speedstep_detect_processor(). However, their speed can be detected using | ||
22 | * the speedstep_get_processor_frequency() call. */ | ||
23 | #define SPEEDSTEP_PROCESSOR_PM 0xFFFFFF03 /* Pentium M */ | ||
24 | #define SPEEDSTEP_PROCESSOR_P4D 0xFFFFFF04 /* desktop P4 */ | ||
25 | #define SPEEDSTEP_PROCESSOR_PCORE 0xFFFFFF05 /* Core */ | ||
26 | |||
27 | /* speedstep states -- only two of them */ | ||
28 | |||
29 | #define SPEEDSTEP_HIGH 0x00000000 | ||
30 | #define SPEEDSTEP_LOW 0x00000001 | ||
31 | |||
32 | |||
33 | /* detect a speedstep-capable processor */ | ||
34 | extern unsigned int speedstep_detect_processor (void); | ||
35 | |||
36 | /* detect the current speed (in khz) of the processor */ | ||
37 | extern unsigned int speedstep_get_processor_frequency(unsigned int processor); | ||
38 | |||
39 | |||
40 | /* detect the low and high speeds of the processor. The callback | ||
41 | * set_state"'s first argument is either SPEEDSTEP_HIGH or | ||
42 | * SPEEDSTEP_LOW; the second argument is zero so that no | ||
43 | * cpufreq_notify_transition calls are initiated. | ||
44 | */ | ||
45 | extern unsigned int speedstep_get_freqs(unsigned int processor, | ||
46 | unsigned int *low_speed, | ||
47 | unsigned int *high_speed, | ||
48 | unsigned int *transition_latency, | ||
49 | void (*set_state) (unsigned int state)); | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c new file mode 100644 index 000000000000..e1c509aa3054 --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c | |||
@@ -0,0 +1,424 @@ | |||
1 | /* | ||
2 | * Intel SpeedStep SMI driver. | ||
3 | * | ||
4 | * (C) 2003 Hiroshi Miura <miura@da-cha.org> | ||
5 | * | ||
6 | * Licensed under the terms of the GNU GPL License version 2. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | |||
11 | /********************************************************************* | ||
12 | * SPEEDSTEP - DEFINITIONS * | ||
13 | *********************************************************************/ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/moduleparam.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/cpufreq.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <asm/ist.h> | ||
23 | #include <asm/io.h> | ||
24 | |||
25 | #include "speedstep-lib.h" | ||
26 | |||
27 | /* speedstep system management interface port/command. | ||
28 | * | ||
29 | * These parameters are got from IST-SMI BIOS call. | ||
30 | * If user gives it, these are used. | ||
31 | * | ||
32 | */ | ||
33 | static int smi_port = 0; | ||
34 | static int smi_cmd = 0; | ||
35 | static unsigned int smi_sig = 0; | ||
36 | |||
37 | /* info about the processor */ | ||
38 | static unsigned int speedstep_processor = 0; | ||
39 | |||
40 | /* | ||
41 | * There are only two frequency states for each processor. Values | ||
42 | * are in kHz for the time being. | ||
43 | */ | ||
44 | static struct cpufreq_frequency_table speedstep_freqs[] = { | ||
45 | {SPEEDSTEP_HIGH, 0}, | ||
46 | {SPEEDSTEP_LOW, 0}, | ||
47 | {0, CPUFREQ_TABLE_END}, | ||
48 | }; | ||
49 | |||
50 | #define GET_SPEEDSTEP_OWNER 0 | ||
51 | #define GET_SPEEDSTEP_STATE 1 | ||
52 | #define SET_SPEEDSTEP_STATE 2 | ||
53 | #define GET_SPEEDSTEP_FREQS 4 | ||
54 | |||
55 | /* how often shall the SMI call be tried if it failed, e.g. because | ||
56 | * of DMA activity going on? */ | ||
57 | #define SMI_TRIES 5 | ||
58 | |||
59 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-smi", msg) | ||
60 | |||
61 | /** | ||
62 | * speedstep_smi_ownership | ||
63 | */ | ||
64 | static int speedstep_smi_ownership (void) | ||
65 | { | ||
66 | u32 command, result, magic; | ||
67 | u32 function = GET_SPEEDSTEP_OWNER; | ||
68 | unsigned char magic_data[] = "Copyright (c) 1999 Intel Corporation"; | ||
69 | |||
70 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); | ||
71 | magic = virt_to_phys(magic_data); | ||
72 | |||
73 | dprintk("trying to obtain ownership with command %x at port %x\n", command, smi_port); | ||
74 | |||
75 | __asm__ __volatile__( | ||
76 | "out %%al, (%%dx)\n" | ||
77 | : "=D" (result) | ||
78 | : "a" (command), "b" (function), "c" (0), "d" (smi_port), | ||
79 | "D" (0), "S" (magic) | ||
80 | : "memory" | ||
81 | ); | ||
82 | |||
83 | dprintk("result is %x\n", result); | ||
84 | |||
85 | return result; | ||
86 | } | ||
87 | |||
88 | /** | ||
89 | * speedstep_smi_get_freqs - get SpeedStep preferred & current freq. | ||
90 | * @low: the low frequency value is placed here | ||
91 | * @high: the high frequency value is placed here | ||
92 | * | ||
93 | * Only available on later SpeedStep-enabled systems, returns false results or | ||
94 | * even hangs [cf. bugme.osdl.org # 1422] on earlier systems. Empirical testing | ||
95 | * shows that the latter occurs if !(ist_info.event & 0xFFFF). | ||
96 | */ | ||
97 | static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high) | ||
98 | { | ||
99 | u32 command, result = 0, edi, high_mhz, low_mhz; | ||
100 | u32 state=0; | ||
101 | u32 function = GET_SPEEDSTEP_FREQS; | ||
102 | |||
103 | if (!(ist_info.event & 0xFFFF)) { | ||
104 | dprintk("bug #1422 -- can't read freqs from BIOS\n"); | ||
105 | return -ENODEV; | ||
106 | } | ||
107 | |||
108 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); | ||
109 | |||
110 | dprintk("trying to determine frequencies with command %x at port %x\n", command, smi_port); | ||
111 | |||
112 | __asm__ __volatile__("movl $0, %%edi\n" | ||
113 | "out %%al, (%%dx)\n" | ||
114 | : "=a" (result), "=b" (high_mhz), "=c" (low_mhz), "=d" (state), "=D" (edi) | ||
115 | : "a" (command), "b" (function), "c" (state), "d" (smi_port), "S" (0) | ||
116 | ); | ||
117 | |||
118 | dprintk("result %x, low_freq %u, high_freq %u\n", result, low_mhz, high_mhz); | ||
119 | |||
120 | /* abort if results are obviously incorrect... */ | ||
121 | if ((high_mhz + low_mhz) < 600) | ||
122 | return -EINVAL; | ||
123 | |||
124 | *high = high_mhz * 1000; | ||
125 | *low = low_mhz * 1000; | ||
126 | |||
127 | return result; | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * speedstep_get_state - set the SpeedStep state | ||
132 | * @state: processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) | ||
133 | * | ||
134 | */ | ||
135 | static int speedstep_get_state (void) | ||
136 | { | ||
137 | u32 function=GET_SPEEDSTEP_STATE; | ||
138 | u32 result, state, edi, command; | ||
139 | |||
140 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); | ||
141 | |||
142 | dprintk("trying to determine current setting with command %x at port %x\n", command, smi_port); | ||
143 | |||
144 | __asm__ __volatile__("movl $0, %%edi\n" | ||
145 | "out %%al, (%%dx)\n" | ||
146 | : "=a" (result), "=b" (state), "=D" (edi) | ||
147 | : "a" (command), "b" (function), "c" (0), "d" (smi_port), "S" (0) | ||
148 | ); | ||
149 | |||
150 | dprintk("state is %x, result is %x\n", state, result); | ||
151 | |||
152 | return (state & 1); | ||
153 | } | ||
154 | |||
155 | |||
156 | /** | ||
157 | * speedstep_set_state - set the SpeedStep state | ||
158 | * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) | ||
159 | * | ||
160 | */ | ||
161 | static void speedstep_set_state (unsigned int state) | ||
162 | { | ||
163 | unsigned int result = 0, command, new_state; | ||
164 | unsigned long flags; | ||
165 | unsigned int function=SET_SPEEDSTEP_STATE; | ||
166 | unsigned int retry = 0; | ||
167 | |||
168 | if (state > 0x1) | ||
169 | return; | ||
170 | |||
171 | /* Disable IRQs */ | ||
172 | local_irq_save(flags); | ||
173 | |||
174 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); | ||
175 | |||
176 | dprintk("trying to set frequency to state %u with command %x at port %x\n", state, command, smi_port); | ||
177 | |||
178 | do { | ||
179 | if (retry) { | ||
180 | dprintk("retry %u, previous result %u, waiting...\n", retry, result); | ||
181 | mdelay(retry * 50); | ||
182 | } | ||
183 | retry++; | ||
184 | __asm__ __volatile__( | ||
185 | "movl $0, %%edi\n" | ||
186 | "out %%al, (%%dx)\n" | ||
187 | : "=b" (new_state), "=D" (result) | ||
188 | : "a" (command), "b" (function), "c" (state), "d" (smi_port), "S" (0) | ||
189 | ); | ||
190 | } while ((new_state != state) && (retry <= SMI_TRIES)); | ||
191 | |||
192 | /* enable IRQs */ | ||
193 | local_irq_restore(flags); | ||
194 | |||
195 | if (new_state == state) { | ||
196 | dprintk("change to %u MHz succeeded after %u tries with result %u\n", (speedstep_freqs[new_state].frequency / 1000), retry, result); | ||
197 | } else { | ||
198 | printk(KERN_ERR "cpufreq: change failed with new_state %u and result %u\n", new_state, result); | ||
199 | } | ||
200 | |||
201 | return; | ||
202 | } | ||
203 | |||
204 | |||
205 | /** | ||
206 | * speedstep_target - set a new CPUFreq policy | ||
207 | * @policy: new policy | ||
208 | * @target_freq: new freq | ||
209 | * @relation: | ||
210 | * | ||
211 | * Sets a new CPUFreq policy/freq. | ||
212 | */ | ||
213 | static int speedstep_target (struct cpufreq_policy *policy, | ||
214 | unsigned int target_freq, unsigned int relation) | ||
215 | { | ||
216 | unsigned int newstate = 0; | ||
217 | struct cpufreq_freqs freqs; | ||
218 | |||
219 | if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) | ||
220 | return -EINVAL; | ||
221 | |||
222 | freqs.old = speedstep_freqs[speedstep_get_state()].frequency; | ||
223 | freqs.new = speedstep_freqs[newstate].frequency; | ||
224 | freqs.cpu = 0; /* speedstep.c is UP only driver */ | ||
225 | |||
226 | if (freqs.old == freqs.new) | ||
227 | return 0; | ||
228 | |||
229 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
230 | speedstep_set_state(newstate); | ||
231 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
232 | |||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | |||
237 | /** | ||
238 | * speedstep_verify - verifies a new CPUFreq policy | ||
239 | * @policy: new policy | ||
240 | * | ||
241 | * Limit must be within speedstep_low_freq and speedstep_high_freq, with | ||
242 | * at least one border included. | ||
243 | */ | ||
244 | static int speedstep_verify (struct cpufreq_policy *policy) | ||
245 | { | ||
246 | return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); | ||
247 | } | ||
248 | |||
249 | |||
250 | static int speedstep_cpu_init(struct cpufreq_policy *policy) | ||
251 | { | ||
252 | int result; | ||
253 | unsigned int speed,state; | ||
254 | |||
255 | /* capability check */ | ||
256 | if (policy->cpu != 0) | ||
257 | return -ENODEV; | ||
258 | |||
259 | result = speedstep_smi_ownership(); | ||
260 | if (result) { | ||
261 | dprintk("fails in aquiring ownership of a SMI interface.\n"); | ||
262 | return -EINVAL; | ||
263 | } | ||
264 | |||
265 | /* detect low and high frequency */ | ||
266 | result = speedstep_smi_get_freqs(&speedstep_freqs[SPEEDSTEP_LOW].frequency, | ||
267 | &speedstep_freqs[SPEEDSTEP_HIGH].frequency); | ||
268 | if (result) { | ||
269 | /* fall back to speedstep_lib.c dection mechanism: try both states out */ | ||
270 | dprintk("could not detect low and high frequencies by SMI call.\n"); | ||
271 | result = speedstep_get_freqs(speedstep_processor, | ||
272 | &speedstep_freqs[SPEEDSTEP_LOW].frequency, | ||
273 | &speedstep_freqs[SPEEDSTEP_HIGH].frequency, | ||
274 | NULL, | ||
275 | &speedstep_set_state); | ||
276 | |||
277 | if (result) { | ||
278 | dprintk("could not detect two different speeds -- aborting.\n"); | ||
279 | return result; | ||
280 | } else | ||
281 | dprintk("workaround worked.\n"); | ||
282 | } | ||
283 | |||
284 | /* get current speed setting */ | ||
285 | state = speedstep_get_state(); | ||
286 | speed = speedstep_freqs[state].frequency; | ||
287 | |||
288 | dprintk("currently at %s speed setting - %i MHz\n", | ||
289 | (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", | ||
290 | (speed / 1000)); | ||
291 | |||
292 | /* cpuinfo and default policy values */ | ||
293 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
294 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
295 | policy->cur = speed; | ||
296 | |||
297 | result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); | ||
298 | if (result) | ||
299 | return (result); | ||
300 | |||
301 | cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); | ||
302 | |||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | static int speedstep_cpu_exit(struct cpufreq_policy *policy) | ||
307 | { | ||
308 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | static unsigned int speedstep_get(unsigned int cpu) | ||
313 | { | ||
314 | if (cpu) | ||
315 | return -ENODEV; | ||
316 | return speedstep_get_processor_frequency(speedstep_processor); | ||
317 | } | ||
318 | |||
319 | |||
320 | static int speedstep_resume(struct cpufreq_policy *policy) | ||
321 | { | ||
322 | int result = speedstep_smi_ownership(); | ||
323 | |||
324 | if (result) | ||
325 | dprintk("fails in re-aquiring ownership of a SMI interface.\n"); | ||
326 | |||
327 | return result; | ||
328 | } | ||
329 | |||
330 | static struct freq_attr* speedstep_attr[] = { | ||
331 | &cpufreq_freq_attr_scaling_available_freqs, | ||
332 | NULL, | ||
333 | }; | ||
334 | |||
335 | static struct cpufreq_driver speedstep_driver = { | ||
336 | .name = "speedstep-smi", | ||
337 | .verify = speedstep_verify, | ||
338 | .target = speedstep_target, | ||
339 | .init = speedstep_cpu_init, | ||
340 | .exit = speedstep_cpu_exit, | ||
341 | .get = speedstep_get, | ||
342 | .resume = speedstep_resume, | ||
343 | .owner = THIS_MODULE, | ||
344 | .attr = speedstep_attr, | ||
345 | }; | ||
346 | |||
347 | /** | ||
348 | * speedstep_init - initializes the SpeedStep CPUFreq driver | ||
349 | * | ||
350 | * Initializes the SpeedStep support. Returns -ENODEV on unsupported | ||
351 | * BIOS, -EINVAL on problems during initiatization, and zero on | ||
352 | * success. | ||
353 | */ | ||
354 | static int __init speedstep_init(void) | ||
355 | { | ||
356 | speedstep_processor = speedstep_detect_processor(); | ||
357 | |||
358 | switch (speedstep_processor) { | ||
359 | case SPEEDSTEP_PROCESSOR_PIII_T: | ||
360 | case SPEEDSTEP_PROCESSOR_PIII_C: | ||
361 | case SPEEDSTEP_PROCESSOR_PIII_C_EARLY: | ||
362 | break; | ||
363 | default: | ||
364 | speedstep_processor = 0; | ||
365 | } | ||
366 | |||
367 | if (!speedstep_processor) { | ||
368 | dprintk ("No supported Intel CPU detected.\n"); | ||
369 | return -ENODEV; | ||
370 | } | ||
371 | |||
372 | dprintk("signature:0x%.8lx, command:0x%.8lx, event:0x%.8lx, perf_level:0x%.8lx.\n", | ||
373 | ist_info.signature, ist_info.command, ist_info.event, ist_info.perf_level); | ||
374 | |||
375 | /* Error if no IST-SMI BIOS or no PARM | ||
376 | sig= 'ISGE' aka 'Intel Speedstep Gate E' */ | ||
377 | if ((ist_info.signature != 0x47534943) && ( | ||
378 | (smi_port == 0) || (smi_cmd == 0))) | ||
379 | return -ENODEV; | ||
380 | |||
381 | if (smi_sig == 1) | ||
382 | smi_sig = 0x47534943; | ||
383 | else | ||
384 | smi_sig = ist_info.signature; | ||
385 | |||
386 | /* setup smi_port from MODLULE_PARM or BIOS */ | ||
387 | if ((smi_port > 0xff) || (smi_port < 0)) | ||
388 | return -EINVAL; | ||
389 | else if (smi_port == 0) | ||
390 | smi_port = ist_info.command & 0xff; | ||
391 | |||
392 | if ((smi_cmd > 0xff) || (smi_cmd < 0)) | ||
393 | return -EINVAL; | ||
394 | else if (smi_cmd == 0) | ||
395 | smi_cmd = (ist_info.command >> 16) & 0xff; | ||
396 | |||
397 | return cpufreq_register_driver(&speedstep_driver); | ||
398 | } | ||
399 | |||
400 | |||
401 | /** | ||
402 | * speedstep_exit - unregisters SpeedStep support | ||
403 | * | ||
404 | * Unregisters SpeedStep support. | ||
405 | */ | ||
406 | static void __exit speedstep_exit(void) | ||
407 | { | ||
408 | cpufreq_unregister_driver(&speedstep_driver); | ||
409 | } | ||
410 | |||
411 | module_param(smi_port, int, 0444); | ||
412 | module_param(smi_cmd, int, 0444); | ||
413 | module_param(smi_sig, uint, 0444); | ||
414 | |||
415 | MODULE_PARM_DESC(smi_port, "Override the BIOS-given IST port with this value -- Intel's default setting is 0xb2"); | ||
416 | MODULE_PARM_DESC(smi_cmd, "Override the BIOS-given IST command with this value -- Intel's default setting is 0x82"); | ||
417 | MODULE_PARM_DESC(smi_sig, "Set to 1 to fake the IST signature when using the SMI interface."); | ||
418 | |||
419 | MODULE_AUTHOR ("Hiroshi Miura"); | ||
420 | MODULE_DESCRIPTION ("Speedstep driver for IST applet SMI interface."); | ||
421 | MODULE_LICENSE ("GPL"); | ||
422 | |||
423 | module_init(speedstep_init); | ||
424 | module_exit(speedstep_exit); | ||