diff options
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/Kconfig | 9 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/Makefile | 1 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/e_powersaver.c | 334 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/longhaul.c | 359 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/longhaul.h | 153 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/powernow-k8.c | 6 | ||||
-rw-r--r-- | drivers/cpufreq/Kconfig | 2 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 258 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 64 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_stats.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_userspace.c | 2 | ||||
-rw-r--r-- | include/linux/cpufreq.h | 10 |
13 files changed, 795 insertions, 407 deletions
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig index 5299c5bf4454..6c52182ca323 100644 --- a/arch/i386/kernel/cpu/cpufreq/Kconfig +++ b/arch/i386/kernel/cpu/cpufreq/Kconfig | |||
@@ -217,6 +217,15 @@ config X86_LONGHAUL | |||
217 | 217 | ||
218 | If in doubt, say N. | 218 | If in doubt, say N. |
219 | 219 | ||
220 | config X86_E_POWERSAVER | ||
221 | tristate "VIA C7 Enhanced PowerSaver (EXPERIMENTAL)" | ||
222 | select CPU_FREQ_TABLE | ||
223 | depends on EXPERIMENTAL | ||
224 | help | ||
225 | This adds the CPUFreq driver for VIA C7 processors. | ||
226 | |||
227 | If in doubt, say N. | ||
228 | |||
220 | comment "shared options" | 229 | comment "shared options" |
221 | 230 | ||
222 | config X86_ACPI_CPUFREQ_PROC_INTF | 231 | config X86_ACPI_CPUFREQ_PROC_INTF |
diff --git a/arch/i386/kernel/cpu/cpufreq/Makefile b/arch/i386/kernel/cpu/cpufreq/Makefile index 8de3abe322a9..560f7760dae5 100644 --- a/arch/i386/kernel/cpu/cpufreq/Makefile +++ b/arch/i386/kernel/cpu/cpufreq/Makefile | |||
@@ -2,6 +2,7 @@ obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o | |||
2 | obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o | 2 | obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o |
3 | obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o | 3 | obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o |
4 | obj-$(CONFIG_X86_LONGHAUL) += longhaul.o | 4 | obj-$(CONFIG_X86_LONGHAUL) += longhaul.o |
5 | obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o | ||
5 | obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o | 6 | obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o |
6 | obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o | 7 | obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o |
7 | obj-$(CONFIG_X86_LONGRUN) += longrun.o | 8 | obj-$(CONFIG_X86_LONGRUN) += longrun.o |
diff --git a/arch/i386/kernel/cpu/cpufreq/e_powersaver.c b/arch/i386/kernel/cpu/cpufreq/e_powersaver.c new file mode 100644 index 000000000000..f43d98e11cc7 --- /dev/null +++ b/arch/i386/kernel/cpu/cpufreq/e_powersaver.c | |||
@@ -0,0 +1,334 @@ | |||
1 | /* | ||
2 | * Based on documentation provided by Dave Jones. Thanks! | ||
3 | * | ||
4 | * Licensed under the terms of the GNU GPL License version 2. | ||
5 | * | ||
6 | * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/cpufreq.h> | ||
13 | #include <linux/ioport.h> | ||
14 | #include <linux/slab.h> | ||
15 | |||
16 | #include <asm/msr.h> | ||
17 | #include <asm/tsc.h> | ||
18 | #include <asm/timex.h> | ||
19 | #include <asm/io.h> | ||
20 | #include <asm/delay.h> | ||
21 | |||
22 | #define EPS_BRAND_C7M 0 | ||
23 | #define EPS_BRAND_C7 1 | ||
24 | #define EPS_BRAND_EDEN 2 | ||
25 | #define EPS_BRAND_C3 3 | ||
26 | |||
27 | struct eps_cpu_data { | ||
28 | u32 fsb; | ||
29 | struct cpufreq_frequency_table freq_table[]; | ||
30 | }; | ||
31 | |||
32 | static struct eps_cpu_data *eps_cpu[NR_CPUS]; | ||
33 | |||
34 | |||
35 | static unsigned int eps_get(unsigned int cpu) | ||
36 | { | ||
37 | struct eps_cpu_data *centaur; | ||
38 | u32 lo, hi; | ||
39 | |||
40 | if (cpu) | ||
41 | return 0; | ||
42 | centaur = eps_cpu[cpu]; | ||
43 | if (centaur == NULL) | ||
44 | return 0; | ||
45 | |||
46 | /* Return current frequency */ | ||
47 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | ||
48 | return centaur->fsb * ((lo >> 8) & 0xff); | ||
49 | } | ||
50 | |||
51 | static int eps_set_state(struct eps_cpu_data *centaur, | ||
52 | unsigned int cpu, | ||
53 | u32 dest_state) | ||
54 | { | ||
55 | struct cpufreq_freqs freqs; | ||
56 | u32 lo, hi; | ||
57 | int err = 0; | ||
58 | int i; | ||
59 | |||
60 | freqs.old = eps_get(cpu); | ||
61 | freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff); | ||
62 | freqs.cpu = cpu; | ||
63 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
64 | |||
65 | /* Wait while CPU is busy */ | ||
66 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | ||
67 | i = 0; | ||
68 | while (lo & ((1 << 16) | (1 << 17))) { | ||
69 | udelay(16); | ||
70 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | ||
71 | i++; | ||
72 | if (unlikely(i > 64)) { | ||
73 | err = -ENODEV; | ||
74 | goto postchange; | ||
75 | } | ||
76 | } | ||
77 | /* Set new multiplier and voltage */ | ||
78 | wrmsr(MSR_IA32_PERF_CTL, dest_state & 0xffff, 0); | ||
79 | /* Wait until transition end */ | ||
80 | i = 0; | ||
81 | do { | ||
82 | udelay(16); | ||
83 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | ||
84 | i++; | ||
85 | if (unlikely(i > 64)) { | ||
86 | err = -ENODEV; | ||
87 | goto postchange; | ||
88 | } | ||
89 | } while (lo & ((1 << 16) | (1 << 17))); | ||
90 | |||
91 | /* Return current frequency */ | ||
92 | postchange: | ||
93 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | ||
94 | freqs.new = centaur->fsb * ((lo >> 8) & 0xff); | ||
95 | |||
96 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
97 | return err; | ||
98 | } | ||
99 | |||
100 | static int eps_target(struct cpufreq_policy *policy, | ||
101 | unsigned int target_freq, | ||
102 | unsigned int relation) | ||
103 | { | ||
104 | struct eps_cpu_data *centaur; | ||
105 | unsigned int newstate = 0; | ||
106 | unsigned int cpu = policy->cpu; | ||
107 | unsigned int dest_state; | ||
108 | int ret; | ||
109 | |||
110 | if (unlikely(eps_cpu[cpu] == NULL)) | ||
111 | return -ENODEV; | ||
112 | centaur = eps_cpu[cpu]; | ||
113 | |||
114 | if (unlikely(cpufreq_frequency_table_target(policy, | ||
115 | &eps_cpu[cpu]->freq_table[0], | ||
116 | target_freq, | ||
117 | relation, | ||
118 | &newstate))) { | ||
119 | return -EINVAL; | ||
120 | } | ||
121 | |||
122 | /* Make frequency transition */ | ||
123 | dest_state = centaur->freq_table[newstate].index & 0xffff; | ||
124 | ret = eps_set_state(centaur, cpu, dest_state); | ||
125 | if (ret) | ||
126 | printk(KERN_ERR "eps: Timeout!\n"); | ||
127 | return ret; | ||
128 | } | ||
129 | |||
130 | static int eps_verify(struct cpufreq_policy *policy) | ||
131 | { | ||
132 | return cpufreq_frequency_table_verify(policy, | ||
133 | &eps_cpu[policy->cpu]->freq_table[0]); | ||
134 | } | ||
135 | |||
136 | static int eps_cpu_init(struct cpufreq_policy *policy) | ||
137 | { | ||
138 | unsigned int i; | ||
139 | u32 lo, hi; | ||
140 | u64 val; | ||
141 | u8 current_multiplier, current_voltage; | ||
142 | u8 max_multiplier, max_voltage; | ||
143 | u8 min_multiplier, min_voltage; | ||
144 | u8 brand; | ||
145 | u32 fsb; | ||
146 | struct eps_cpu_data *centaur; | ||
147 | struct cpufreq_frequency_table *f_table; | ||
148 | int k, step, voltage; | ||
149 | int ret; | ||
150 | int states; | ||
151 | |||
152 | if (policy->cpu != 0) | ||
153 | return -ENODEV; | ||
154 | |||
155 | /* Check brand */ | ||
156 | printk("eps: Detected VIA "); | ||
157 | rdmsr(0x1153, lo, hi); | ||
158 | brand = (((lo >> 2) ^ lo) >> 18) & 3; | ||
159 | switch(brand) { | ||
160 | case EPS_BRAND_C7M: | ||
161 | printk("C7-M\n"); | ||
162 | break; | ||
163 | case EPS_BRAND_C7: | ||
164 | printk("C7\n"); | ||
165 | break; | ||
166 | case EPS_BRAND_EDEN: | ||
167 | printk("Eden\n"); | ||
168 | break; | ||
169 | case EPS_BRAND_C3: | ||
170 | printk("C3\n"); | ||
171 | return -ENODEV; | ||
172 | break; | ||
173 | } | ||
174 | /* Enable Enhanced PowerSaver */ | ||
175 | rdmsrl(MSR_IA32_MISC_ENABLE, val); | ||
176 | if (!(val & 1 << 16)) { | ||
177 | val |= 1 << 16; | ||
178 | wrmsrl(MSR_IA32_MISC_ENABLE, val); | ||
179 | /* Can be locked at 0 */ | ||
180 | rdmsrl(MSR_IA32_MISC_ENABLE, val); | ||
181 | if (!(val & 1 << 16)) { | ||
182 | printk("eps: Can't enable Enhanced PowerSaver\n"); | ||
183 | return -ENODEV; | ||
184 | } | ||
185 | } | ||
186 | |||
187 | /* Print voltage and multiplier */ | ||
188 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | ||
189 | current_voltage = lo & 0xff; | ||
190 | printk("eps: Current voltage = %dmV\n", current_voltage * 16 + 700); | ||
191 | current_multiplier = (lo >> 8) & 0xff; | ||
192 | printk("eps: Current multiplier = %d\n", current_multiplier); | ||
193 | |||
194 | /* Print limits */ | ||
195 | max_voltage = hi & 0xff; | ||
196 | printk("eps: Highest voltage = %dmV\n", max_voltage * 16 + 700); | ||
197 | max_multiplier = (hi >> 8) & 0xff; | ||
198 | printk("eps: Highest multiplier = %d\n", max_multiplier); | ||
199 | min_voltage = (hi >> 16) & 0xff; | ||
200 | printk("eps: Lowest voltage = %dmV\n", min_voltage * 16 + 700); | ||
201 | min_multiplier = (hi >> 24) & 0xff; | ||
202 | printk("eps: Lowest multiplier = %d\n", min_multiplier); | ||
203 | |||
204 | /* Sanity checks */ | ||
205 | if (current_multiplier == 0 || max_multiplier == 0 | ||
206 | || min_multiplier == 0) | ||
207 | return -EINVAL; | ||
208 | if (current_multiplier > max_multiplier | ||
209 | || max_multiplier <= min_multiplier) | ||
210 | return -EINVAL; | ||
211 | if (current_voltage > 0x1c || max_voltage > 0x1c) | ||
212 | return -EINVAL; | ||
213 | if (max_voltage < min_voltage) | ||
214 | return -EINVAL; | ||
215 | |||
216 | /* Calc FSB speed */ | ||
217 | fsb = cpu_khz / current_multiplier; | ||
218 | /* Calc number of p-states supported */ | ||
219 | if (brand == EPS_BRAND_C7M) | ||
220 | states = max_multiplier - min_multiplier + 1; | ||
221 | else | ||
222 | states = 2; | ||
223 | |||
224 | /* Allocate private data and frequency table for current cpu */ | ||
225 | centaur = kzalloc(sizeof(struct eps_cpu_data) | ||
226 | + (states + 1) * sizeof(struct cpufreq_frequency_table), | ||
227 | GFP_KERNEL); | ||
228 | if (!centaur) | ||
229 | return -ENOMEM; | ||
230 | eps_cpu[0] = centaur; | ||
231 | |||
232 | /* Copy basic values */ | ||
233 | centaur->fsb = fsb; | ||
234 | |||
235 | /* Fill frequency and MSR value table */ | ||
236 | f_table = ¢aur->freq_table[0]; | ||
237 | if (brand != EPS_BRAND_C7M) { | ||
238 | f_table[0].frequency = fsb * min_multiplier; | ||
239 | f_table[0].index = (min_multiplier << 8) | min_voltage; | ||
240 | f_table[1].frequency = fsb * max_multiplier; | ||
241 | f_table[1].index = (max_multiplier << 8) | max_voltage; | ||
242 | f_table[2].frequency = CPUFREQ_TABLE_END; | ||
243 | } else { | ||
244 | k = 0; | ||
245 | step = ((max_voltage - min_voltage) * 256) | ||
246 | / (max_multiplier - min_multiplier); | ||
247 | for (i = min_multiplier; i <= max_multiplier; i++) { | ||
248 | voltage = (k * step) / 256 + min_voltage; | ||
249 | f_table[k].frequency = fsb * i; | ||
250 | f_table[k].index = (i << 8) | voltage; | ||
251 | k++; | ||
252 | } | ||
253 | f_table[k].frequency = CPUFREQ_TABLE_END; | ||
254 | } | ||
255 | |||
256 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
257 | policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */ | ||
258 | policy->cur = fsb * current_multiplier; | ||
259 | |||
260 | ret = cpufreq_frequency_table_cpuinfo(policy, ¢aur->freq_table[0]); | ||
261 | if (ret) { | ||
262 | kfree(centaur); | ||
263 | return ret; | ||
264 | } | ||
265 | |||
266 | cpufreq_frequency_table_get_attr(¢aur->freq_table[0], policy->cpu); | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | static int eps_cpu_exit(struct cpufreq_policy *policy) | ||
271 | { | ||
272 | unsigned int cpu = policy->cpu; | ||
273 | struct eps_cpu_data *centaur; | ||
274 | u32 lo, hi; | ||
275 | |||
276 | if (eps_cpu[cpu] == NULL) | ||
277 | return -ENODEV; | ||
278 | centaur = eps_cpu[cpu]; | ||
279 | |||
280 | /* Get max frequency */ | ||
281 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | ||
282 | /* Set max frequency */ | ||
283 | eps_set_state(centaur, cpu, hi & 0xffff); | ||
284 | /* Bye */ | ||
285 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
286 | kfree(eps_cpu[cpu]); | ||
287 | eps_cpu[cpu] = NULL; | ||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | static struct freq_attr* eps_attr[] = { | ||
292 | &cpufreq_freq_attr_scaling_available_freqs, | ||
293 | NULL, | ||
294 | }; | ||
295 | |||
296 | static struct cpufreq_driver eps_driver = { | ||
297 | .verify = eps_verify, | ||
298 | .target = eps_target, | ||
299 | .init = eps_cpu_init, | ||
300 | .exit = eps_cpu_exit, | ||
301 | .get = eps_get, | ||
302 | .name = "e_powersaver", | ||
303 | .owner = THIS_MODULE, | ||
304 | .attr = eps_attr, | ||
305 | }; | ||
306 | |||
307 | static int __init eps_init(void) | ||
308 | { | ||
309 | struct cpuinfo_x86 *c = cpu_data; | ||
310 | |||
311 | /* This driver will work only on Centaur C7 processors with | ||
312 | * Enhanced SpeedStep/PowerSaver registers */ | ||
313 | if (c->x86_vendor != X86_VENDOR_CENTAUR | ||
314 | || c->x86 != 6 || c->x86_model != 10) | ||
315 | return -ENODEV; | ||
316 | if (!cpu_has(c, X86_FEATURE_EST)) | ||
317 | return -ENODEV; | ||
318 | |||
319 | if (cpufreq_register_driver(&eps_driver)) | ||
320 | return -EINVAL; | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static void __exit eps_exit(void) | ||
325 | { | ||
326 | cpufreq_unregister_driver(&eps_driver); | ||
327 | } | ||
328 | |||
329 | MODULE_AUTHOR("Rafa³ Bilski <rafalbilski@interia.pl>"); | ||
330 | MODULE_DESCRIPTION("Enhanced PowerSaver driver for VIA C7 CPU's."); | ||
331 | MODULE_LICENSE("GPL"); | ||
332 | |||
333 | module_init(eps_init); | ||
334 | module_exit(eps_exit); | ||
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c index a3db9332d652..b59878a0d9b3 100644 --- a/arch/i386/kernel/cpu/cpufreq/longhaul.c +++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c | |||
@@ -8,12 +8,11 @@ | |||
8 | * VIA have currently 3 different versions of Longhaul. | 8 | * VIA have currently 3 different versions of Longhaul. |
9 | * Version 1 (Longhaul) uses the BCR2 MSR at 0x1147. | 9 | * Version 1 (Longhaul) uses the BCR2 MSR at 0x1147. |
10 | * It is present only in Samuel 1 (C5A), Samuel 2 (C5B) stepping 0. | 10 | * It is present only in Samuel 1 (C5A), Samuel 2 (C5B) stepping 0. |
11 | * Version 2 of longhaul is the same as v1, but adds voltage scaling. | 11 | * Version 2 of longhaul is backward compatible with v1, but adds |
12 | * Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C) | 12 | * LONGHAUL MSR for purpose of both frequency and voltage scaling. |
13 | * voltage scaling support has currently been disabled in this driver | 13 | * Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C). |
14 | * until we have code that gets it right. | ||
15 | * Version 3 of longhaul got renamed to Powersaver and redesigned | 14 | * Version 3 of longhaul got renamed to Powersaver and redesigned |
16 | * to use the POWERSAVER MSR at 0x110a. | 15 | * to use only the POWERSAVER MSR at 0x110a. |
17 | * It is present in Ezra-T (C5M), Nehemiah (C5X) and above. | 16 | * It is present in Ezra-T (C5M), Nehemiah (C5X) and above. |
18 | * It's pretty much the same feature wise to longhaul v2, though | 17 | * It's pretty much the same feature wise to longhaul v2, though |
19 | * there is provision for scaling FSB too, but this doesn't work | 18 | * there is provision for scaling FSB too, but this doesn't work |
@@ -51,10 +50,12 @@ | |||
51 | #define CPU_EZRA 3 | 50 | #define CPU_EZRA 3 |
52 | #define CPU_EZRA_T 4 | 51 | #define CPU_EZRA_T 4 |
53 | #define CPU_NEHEMIAH 5 | 52 | #define CPU_NEHEMIAH 5 |
53 | #define CPU_NEHEMIAH_C 6 | ||
54 | 54 | ||
55 | /* Flags */ | 55 | /* Flags */ |
56 | #define USE_ACPI_C3 (1 << 1) | 56 | #define USE_ACPI_C3 (1 << 1) |
57 | #define USE_NORTHBRIDGE (1 << 2) | 57 | #define USE_NORTHBRIDGE (1 << 2) |
58 | #define USE_VT8235 (1 << 3) | ||
58 | 59 | ||
59 | static int cpu_model; | 60 | static int cpu_model; |
60 | static unsigned int numscales=16; | 61 | static unsigned int numscales=16; |
@@ -63,7 +64,8 @@ static unsigned int fsb; | |||
63 | static struct mV_pos *vrm_mV_table; | 64 | static struct mV_pos *vrm_mV_table; |
64 | static unsigned char *mV_vrm_table; | 65 | static unsigned char *mV_vrm_table; |
65 | struct f_msr { | 66 | struct f_msr { |
66 | unsigned char vrm; | 67 | u8 vrm; |
68 | u8 pos; | ||
67 | }; | 69 | }; |
68 | static struct f_msr f_msr_table[32]; | 70 | static struct f_msr f_msr_table[32]; |
69 | 71 | ||
@@ -73,10 +75,10 @@ static int can_scale_voltage; | |||
73 | static struct acpi_processor *pr = NULL; | 75 | static struct acpi_processor *pr = NULL; |
74 | static struct acpi_processor_cx *cx = NULL; | 76 | static struct acpi_processor_cx *cx = NULL; |
75 | static u8 longhaul_flags; | 77 | static u8 longhaul_flags; |
78 | static u8 longhaul_pos; | ||
76 | 79 | ||
77 | /* Module parameters */ | 80 | /* Module parameters */ |
78 | static int scale_voltage; | 81 | static int scale_voltage; |
79 | static int ignore_latency; | ||
80 | 82 | ||
81 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg) | 83 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg) |
82 | 84 | ||
@@ -164,26 +166,47 @@ static void do_longhaul1(unsigned int clock_ratio_index) | |||
164 | static void do_powersaver(int cx_address, unsigned int clock_ratio_index) | 166 | static void do_powersaver(int cx_address, unsigned int clock_ratio_index) |
165 | { | 167 | { |
166 | union msr_longhaul longhaul; | 168 | union msr_longhaul longhaul; |
169 | u8 dest_pos; | ||
167 | u32 t; | 170 | u32 t; |
168 | 171 | ||
172 | dest_pos = f_msr_table[clock_ratio_index].pos; | ||
173 | |||
169 | rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); | 174 | rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); |
175 | /* Setup new frequency */ | ||
170 | longhaul.bits.RevisionKey = longhaul.bits.RevisionID; | 176 | longhaul.bits.RevisionKey = longhaul.bits.RevisionID; |
171 | longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf; | 177 | longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf; |
172 | longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; | 178 | longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; |
173 | longhaul.bits.EnableSoftBusRatio = 1; | 179 | /* Setup new voltage */ |
174 | 180 | if (can_scale_voltage) | |
175 | if (can_scale_voltage) { | ||
176 | longhaul.bits.SoftVID = f_msr_table[clock_ratio_index].vrm; | 181 | longhaul.bits.SoftVID = f_msr_table[clock_ratio_index].vrm; |
182 | /* Sync to timer tick */ | ||
183 | safe_halt(); | ||
184 | /* Raise voltage if necessary */ | ||
185 | if (can_scale_voltage && longhaul_pos < dest_pos) { | ||
177 | longhaul.bits.EnableSoftVID = 1; | 186 | longhaul.bits.EnableSoftVID = 1; |
187 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | ||
188 | /* Change voltage */ | ||
189 | if (!cx_address) { | ||
190 | ACPI_FLUSH_CPU_CACHE(); | ||
191 | halt(); | ||
192 | } else { | ||
193 | ACPI_FLUSH_CPU_CACHE(); | ||
194 | /* Invoke C3 */ | ||
195 | inb(cx_address); | ||
196 | /* Dummy op - must do something useless after P_LVL3 | ||
197 | * read */ | ||
198 | t = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
199 | } | ||
200 | longhaul.bits.EnableSoftVID = 0; | ||
201 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | ||
202 | longhaul_pos = dest_pos; | ||
178 | } | 203 | } |
179 | 204 | ||
180 | /* Sync to timer tick */ | ||
181 | safe_halt(); | ||
182 | /* Change frequency on next halt or sleep */ | 205 | /* Change frequency on next halt or sleep */ |
206 | longhaul.bits.EnableSoftBusRatio = 1; | ||
183 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | 207 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); |
184 | if (!cx_address) { | 208 | if (!cx_address) { |
185 | ACPI_FLUSH_CPU_CACHE(); | 209 | ACPI_FLUSH_CPU_CACHE(); |
186 | /* Invoke C1 */ | ||
187 | halt(); | 210 | halt(); |
188 | } else { | 211 | } else { |
189 | ACPI_FLUSH_CPU_CACHE(); | 212 | ACPI_FLUSH_CPU_CACHE(); |
@@ -193,12 +216,29 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index) | |||
193 | t = inl(acpi_gbl_FADT.xpm_timer_block.address); | 216 | t = inl(acpi_gbl_FADT.xpm_timer_block.address); |
194 | } | 217 | } |
195 | /* Disable bus ratio bit */ | 218 | /* Disable bus ratio bit */ |
196 | local_irq_disable(); | ||
197 | longhaul.bits.RevisionKey = longhaul.bits.RevisionID; | ||
198 | longhaul.bits.EnableSoftBusRatio = 0; | 219 | longhaul.bits.EnableSoftBusRatio = 0; |
199 | longhaul.bits.EnableSoftBSEL = 0; | ||
200 | longhaul.bits.EnableSoftVID = 0; | ||
201 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | 220 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); |
221 | |||
222 | /* Reduce voltage if necessary */ | ||
223 | if (can_scale_voltage && longhaul_pos > dest_pos) { | ||
224 | longhaul.bits.EnableSoftVID = 1; | ||
225 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | ||
226 | /* Change voltage */ | ||
227 | if (!cx_address) { | ||
228 | ACPI_FLUSH_CPU_CACHE(); | ||
229 | halt(); | ||
230 | } else { | ||
231 | ACPI_FLUSH_CPU_CACHE(); | ||
232 | /* Invoke C3 */ | ||
233 | inb(cx_address); | ||
234 | /* Dummy op - must do something useless after P_LVL3 | ||
235 | * read */ | ||
236 | t = inl(acpi_gbl_FADT.xpm_timer_block.address); | ||
237 | } | ||
238 | longhaul.bits.EnableSoftVID = 0; | ||
239 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | ||
240 | longhaul_pos = dest_pos; | ||
241 | } | ||
202 | } | 242 | } |
203 | 243 | ||
204 | /** | 244 | /** |
@@ -257,26 +297,19 @@ static void longhaul_setstate(unsigned int clock_ratio_index) | |||
257 | /* | 297 | /* |
258 | * Longhaul v1. (Samuel[C5A] and Samuel2 stepping 0[C5B]) | 298 | * Longhaul v1. (Samuel[C5A] and Samuel2 stepping 0[C5B]) |
259 | * Software controlled multipliers only. | 299 | * Software controlled multipliers only. |
260 | * | ||
261 | * *NB* Until we get voltage scaling working v1 & v2 are the same code. | ||
262 | * Longhaul v2 appears in Samuel2 Steppings 1->7 [C5b] and Ezra [C5C] | ||
263 | */ | 300 | */ |
264 | case TYPE_LONGHAUL_V1: | 301 | case TYPE_LONGHAUL_V1: |
265 | case TYPE_LONGHAUL_V2: | ||
266 | do_longhaul1(clock_ratio_index); | 302 | do_longhaul1(clock_ratio_index); |
267 | break; | 303 | break; |
268 | 304 | ||
269 | /* | 305 | /* |
306 | * Longhaul v2 appears in Samuel2 Steppings 1->7 [C5B] and Ezra [C5C] | ||
307 | * | ||
270 | * Longhaul v3 (aka Powersaver). (Ezra-T [C5M] & Nehemiah [C5N]) | 308 | * Longhaul v3 (aka Powersaver). (Ezra-T [C5M] & Nehemiah [C5N]) |
271 | * We can scale voltage with this too, but that's currently | ||
272 | * disabled until we come up with a decent 'match freq to voltage' | ||
273 | * algorithm. | ||
274 | * When we add voltage scaling, we will also need to do the | ||
275 | * voltage/freq setting in order depending on the direction | ||
276 | * of scaling (like we do in powernow-k7.c) | ||
277 | * Nehemiah can do FSB scaling too, but this has never been proven | 309 | * Nehemiah can do FSB scaling too, but this has never been proven |
278 | * to work in practice. | 310 | * to work in practice. |
279 | */ | 311 | */ |
312 | case TYPE_LONGHAUL_V2: | ||
280 | case TYPE_POWERSAVER: | 313 | case TYPE_POWERSAVER: |
281 | if (longhaul_flags & USE_ACPI_C3) { | 314 | if (longhaul_flags & USE_ACPI_C3) { |
282 | /* Don't allow wakeup */ | 315 | /* Don't allow wakeup */ |
@@ -301,6 +334,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index) | |||
301 | local_irq_restore(flags); | 334 | local_irq_restore(flags); |
302 | preempt_enable(); | 335 | preempt_enable(); |
303 | 336 | ||
337 | freqs.new = calc_speed(longhaul_get_cpu_mult()); | ||
304 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 338 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
305 | } | 339 | } |
306 | 340 | ||
@@ -315,31 +349,19 @@ static void longhaul_setstate(unsigned int clock_ratio_index) | |||
315 | 349 | ||
316 | #define ROUNDING 0xf | 350 | #define ROUNDING 0xf |
317 | 351 | ||
318 | static int _guess(int guess, int mult) | ||
319 | { | ||
320 | int target; | ||
321 | |||
322 | target = ((mult/10)*guess); | ||
323 | if (mult%10 != 0) | ||
324 | target += (guess/2); | ||
325 | target += ROUNDING/2; | ||
326 | target &= ~ROUNDING; | ||
327 | return target; | ||
328 | } | ||
329 | |||
330 | |||
331 | static int guess_fsb(int mult) | 352 | static int guess_fsb(int mult) |
332 | { | 353 | { |
333 | int speed = (cpu_khz/1000); | 354 | int speed = cpu_khz / 1000; |
334 | int i; | 355 | int i; |
335 | int speeds[] = { 66, 100, 133, 200 }; | 356 | int speeds[] = { 666, 1000, 1333, 2000 }; |
336 | 357 | int f_max, f_min; | |
337 | speed += ROUNDING/2; | 358 | |
338 | speed &= ~ROUNDING; | 359 | for (i = 0; i < 4; i++) { |
339 | 360 | f_max = ((speeds[i] * mult) + 50) / 100; | |
340 | for (i=0; i<4; i++) { | 361 | f_max += (ROUNDING / 2); |
341 | if (_guess(speeds[i], mult) == speed) | 362 | f_min = f_max - ROUNDING; |
342 | return speeds[i]; | 363 | if ((speed <= f_max) && (speed >= f_min)) |
364 | return speeds[i] / 10; | ||
343 | } | 365 | } |
344 | return 0; | 366 | return 0; |
345 | } | 367 | } |
@@ -347,67 +369,40 @@ static int guess_fsb(int mult) | |||
347 | 369 | ||
348 | static int __init longhaul_get_ranges(void) | 370 | static int __init longhaul_get_ranges(void) |
349 | { | 371 | { |
350 | unsigned long invalue; | ||
351 | unsigned int ezra_t_multipliers[32]= { | ||
352 | 90, 30, 40, 100, 55, 35, 45, 95, | ||
353 | 50, 70, 80, 60, 120, 75, 85, 65, | ||
354 | -1, 110, 120, -1, 135, 115, 125, 105, | ||
355 | 130, 150, 160, 140, -1, 155, -1, 145 }; | ||
356 | unsigned int j, k = 0; | 372 | unsigned int j, k = 0; |
357 | union msr_longhaul longhaul; | 373 | int mult; |
358 | int mult = 0; | ||
359 | 374 | ||
360 | switch (longhaul_version) { | 375 | /* Get current frequency */ |
361 | case TYPE_LONGHAUL_V1: | 376 | mult = longhaul_get_cpu_mult(); |
362 | case TYPE_LONGHAUL_V2: | 377 | if (mult == -1) { |
363 | /* Ugh, Longhaul v1 didn't have the min/max MSRs. | 378 | printk(KERN_INFO PFX "Invalid (reserved) multiplier!\n"); |
364 | Assume min=3.0x & max = whatever we booted at. */ | 379 | return -EINVAL; |
380 | } | ||
381 | fsb = guess_fsb(mult); | ||
382 | if (fsb == 0) { | ||
383 | printk(KERN_INFO PFX "Invalid (reserved) FSB!\n"); | ||
384 | return -EINVAL; | ||
385 | } | ||
386 | /* Get max multiplier - as we always did. | ||
387 | * Longhaul MSR is usefull only when voltage scaling is enabled. | ||
388 | * C3 is booting at max anyway. */ | ||
389 | maxmult = mult; | ||
390 | /* Get min multiplier */ | ||
391 | switch (cpu_model) { | ||
392 | case CPU_NEHEMIAH: | ||
393 | minmult = 50; | ||
394 | break; | ||
395 | case CPU_NEHEMIAH_C: | ||
396 | minmult = 40; | ||
397 | break; | ||
398 | default: | ||
365 | minmult = 30; | 399 | minmult = 30; |
366 | maxmult = mult = longhaul_get_cpu_mult(); | ||
367 | break; | 400 | break; |
368 | |||
369 | case TYPE_POWERSAVER: | ||
370 | /* Ezra-T */ | ||
371 | if (cpu_model==CPU_EZRA_T) { | ||
372 | minmult = 30; | ||
373 | rdmsrl (MSR_VIA_LONGHAUL, longhaul.val); | ||
374 | invalue = longhaul.bits.MaxMHzBR; | ||
375 | if (longhaul.bits.MaxMHzBR4) | ||
376 | invalue += 16; | ||
377 | maxmult = mult = ezra_t_multipliers[invalue]; | ||
378 | break; | ||
379 | } | ||
380 | |||
381 | /* Nehemiah */ | ||
382 | if (cpu_model==CPU_NEHEMIAH) { | ||
383 | rdmsrl (MSR_VIA_LONGHAUL, longhaul.val); | ||
384 | |||
385 | /* | ||
386 | * TODO: This code works, but raises a lot of questions. | ||
387 | * - Some Nehemiah's seem to have broken Min/MaxMHzBR's. | ||
388 | * We get around this by using a hardcoded multiplier of 4.0x | ||
389 | * for the minimimum speed, and the speed we booted up at for the max. | ||
390 | * This is done in longhaul_get_cpu_mult() by reading the EBLCR register. | ||
391 | * - According to some VIA documentation EBLCR is only | ||
392 | * in pre-Nehemiah C3s. How this still works is a mystery. | ||
393 | * We're possibly using something undocumented and unsupported, | ||
394 | * But it works, so we don't grumble. | ||
395 | */ | ||
396 | minmult=40; | ||
397 | maxmult = mult = longhaul_get_cpu_mult(); | ||
398 | break; | ||
399 | } | ||
400 | } | 401 | } |
401 | fsb = guess_fsb(mult); | ||
402 | 402 | ||
403 | dprintk ("MinMult:%d.%dx MaxMult:%d.%dx\n", | 403 | dprintk ("MinMult:%d.%dx MaxMult:%d.%dx\n", |
404 | minmult/10, minmult%10, maxmult/10, maxmult%10); | 404 | minmult/10, minmult%10, maxmult/10, maxmult%10); |
405 | 405 | ||
406 | if (fsb == 0) { | ||
407 | printk (KERN_INFO PFX "Invalid (reserved) FSB!\n"); | ||
408 | return -EINVAL; | ||
409 | } | ||
410 | |||
411 | highest_speed = calc_speed(maxmult); | 406 | highest_speed = calc_speed(maxmult); |
412 | lowest_speed = calc_speed(minmult); | 407 | lowest_speed = calc_speed(minmult); |
413 | dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, | 408 | dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, |
@@ -455,6 +450,7 @@ static void __init longhaul_setup_voltagescaling(void) | |||
455 | union msr_longhaul longhaul; | 450 | union msr_longhaul longhaul; |
456 | struct mV_pos minvid, maxvid; | 451 | struct mV_pos minvid, maxvid; |
457 | unsigned int j, speed, pos, kHz_step, numvscales; | 452 | unsigned int j, speed, pos, kHz_step, numvscales; |
453 | int min_vid_speed; | ||
458 | 454 | ||
459 | rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); | 455 | rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); |
460 | if (!(longhaul.bits.RevisionID & 1)) { | 456 | if (!(longhaul.bits.RevisionID & 1)) { |
@@ -468,14 +464,14 @@ static void __init longhaul_setup_voltagescaling(void) | |||
468 | mV_vrm_table = &mV_vrm85[0]; | 464 | mV_vrm_table = &mV_vrm85[0]; |
469 | } else { | 465 | } else { |
470 | printk (KERN_INFO PFX "Mobile VRM\n"); | 466 | printk (KERN_INFO PFX "Mobile VRM\n"); |
467 | if (cpu_model < CPU_NEHEMIAH) | ||
468 | return; | ||
471 | vrm_mV_table = &mobilevrm_mV[0]; | 469 | vrm_mV_table = &mobilevrm_mV[0]; |
472 | mV_vrm_table = &mV_mobilevrm[0]; | 470 | mV_vrm_table = &mV_mobilevrm[0]; |
473 | } | 471 | } |
474 | 472 | ||
475 | minvid = vrm_mV_table[longhaul.bits.MinimumVID]; | 473 | minvid = vrm_mV_table[longhaul.bits.MinimumVID]; |
476 | maxvid = vrm_mV_table[longhaul.bits.MaximumVID]; | 474 | maxvid = vrm_mV_table[longhaul.bits.MaximumVID]; |
477 | numvscales = maxvid.pos - minvid.pos + 1; | ||
478 | kHz_step = (highest_speed - lowest_speed) / numvscales; | ||
479 | 475 | ||
480 | if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) { | 476 | if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) { |
481 | printk (KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. " | 477 | printk (KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. " |
@@ -491,20 +487,59 @@ static void __init longhaul_setup_voltagescaling(void) | |||
491 | return; | 487 | return; |
492 | } | 488 | } |
493 | 489 | ||
494 | printk(KERN_INFO PFX "Max VID=%d.%03d Min VID=%d.%03d, %d possible voltage scales\n", | 490 | /* How many voltage steps */ |
491 | numvscales = maxvid.pos - minvid.pos + 1; | ||
492 | printk(KERN_INFO PFX | ||
493 | "Max VID=%d.%03d " | ||
494 | "Min VID=%d.%03d, " | ||
495 | "%d possible voltage scales\n", | ||
495 | maxvid.mV/1000, maxvid.mV%1000, | 496 | maxvid.mV/1000, maxvid.mV%1000, |
496 | minvid.mV/1000, minvid.mV%1000, | 497 | minvid.mV/1000, minvid.mV%1000, |
497 | numvscales); | 498 | numvscales); |
498 | 499 | ||
500 | /* Calculate max frequency at min voltage */ | ||
501 | j = longhaul.bits.MinMHzBR; | ||
502 | if (longhaul.bits.MinMHzBR4) | ||
503 | j += 16; | ||
504 | min_vid_speed = eblcr_table[j]; | ||
505 | if (min_vid_speed == -1) | ||
506 | return; | ||
507 | switch (longhaul.bits.MinMHzFSB) { | ||
508 | case 0: | ||
509 | min_vid_speed *= 13333; | ||
510 | break; | ||
511 | case 1: | ||
512 | min_vid_speed *= 10000; | ||
513 | break; | ||
514 | case 3: | ||
515 | min_vid_speed *= 6666; | ||
516 | break; | ||
517 | default: | ||
518 | return; | ||
519 | break; | ||
520 | } | ||
521 | if (min_vid_speed >= highest_speed) | ||
522 | return; | ||
523 | /* Calculate kHz for one voltage step */ | ||
524 | kHz_step = (highest_speed - min_vid_speed) / numvscales; | ||
525 | |||
526 | |||
499 | j = 0; | 527 | j = 0; |
500 | while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) { | 528 | while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) { |
501 | speed = longhaul_table[j].frequency; | 529 | speed = longhaul_table[j].frequency; |
502 | pos = (speed - lowest_speed) / kHz_step + minvid.pos; | 530 | if (speed > min_vid_speed) |
531 | pos = (speed - min_vid_speed) / kHz_step + minvid.pos; | ||
532 | else | ||
533 | pos = minvid.pos; | ||
503 | f_msr_table[longhaul_table[j].index].vrm = mV_vrm_table[pos]; | 534 | f_msr_table[longhaul_table[j].index].vrm = mV_vrm_table[pos]; |
535 | f_msr_table[longhaul_table[j].index].pos = pos; | ||
504 | j++; | 536 | j++; |
505 | } | 537 | } |
506 | 538 | ||
539 | longhaul_pos = maxvid.pos; | ||
507 | can_scale_voltage = 1; | 540 | can_scale_voltage = 1; |
541 | printk(KERN_INFO PFX "Voltage scaling enabled. " | ||
542 | "Use of \"conservative\" governor is highly recommended.\n"); | ||
508 | } | 543 | } |
509 | 544 | ||
510 | 545 | ||
@@ -573,20 +608,51 @@ static int enable_arbiter_disable(void) | |||
573 | if (dev != NULL) { | 608 | if (dev != NULL) { |
574 | /* Enable access to port 0x22 */ | 609 | /* Enable access to port 0x22 */ |
575 | pci_read_config_byte(dev, reg, &pci_cmd); | 610 | pci_read_config_byte(dev, reg, &pci_cmd); |
576 | if ( !(pci_cmd & 1<<7) ) { | 611 | if (!(pci_cmd & 1<<7)) { |
577 | pci_cmd |= 1<<7; | 612 | pci_cmd |= 1<<7; |
578 | pci_write_config_byte(dev, reg, pci_cmd); | 613 | pci_write_config_byte(dev, reg, pci_cmd); |
614 | pci_read_config_byte(dev, reg, &pci_cmd); | ||
615 | if (!(pci_cmd & 1<<7)) { | ||
616 | printk(KERN_ERR PFX | ||
617 | "Can't enable access to port 0x22.\n"); | ||
618 | return 0; | ||
619 | } | ||
579 | } | 620 | } |
580 | return 1; | 621 | return 1; |
581 | } | 622 | } |
582 | return 0; | 623 | return 0; |
583 | } | 624 | } |
584 | 625 | ||
626 | static int longhaul_setup_vt8235(void) | ||
627 | { | ||
628 | struct pci_dev *dev; | ||
629 | u8 pci_cmd; | ||
630 | |||
631 | /* Find VT8235 southbridge */ | ||
632 | dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL); | ||
633 | if (dev != NULL) { | ||
634 | /* Set transition time to max */ | ||
635 | pci_read_config_byte(dev, 0xec, &pci_cmd); | ||
636 | pci_cmd &= ~(1 << 2); | ||
637 | pci_write_config_byte(dev, 0xec, pci_cmd); | ||
638 | pci_read_config_byte(dev, 0xe4, &pci_cmd); | ||
639 | pci_cmd &= ~(1 << 7); | ||
640 | pci_write_config_byte(dev, 0xe4, pci_cmd); | ||
641 | pci_read_config_byte(dev, 0xe5, &pci_cmd); | ||
642 | pci_cmd |= 1 << 7; | ||
643 | pci_write_config_byte(dev, 0xe5, pci_cmd); | ||
644 | return 1; | ||
645 | } | ||
646 | return 0; | ||
647 | } | ||
648 | |||
585 | static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | 649 | static int __init longhaul_cpu_init(struct cpufreq_policy *policy) |
586 | { | 650 | { |
587 | struct cpuinfo_x86 *c = cpu_data; | 651 | struct cpuinfo_x86 *c = cpu_data; |
588 | char *cpuname=NULL; | 652 | char *cpuname=NULL; |
589 | int ret; | 653 | int ret; |
654 | u32 lo, hi; | ||
655 | int vt8235_present; | ||
590 | 656 | ||
591 | /* Check what we have on this motherboard */ | 657 | /* Check what we have on this motherboard */ |
592 | switch (c->x86_model) { | 658 | switch (c->x86_model) { |
@@ -599,16 +665,20 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | |||
599 | break; | 665 | break; |
600 | 666 | ||
601 | case 7: | 667 | case 7: |
602 | longhaul_version = TYPE_LONGHAUL_V1; | ||
603 | switch (c->x86_mask) { | 668 | switch (c->x86_mask) { |
604 | case 0: | 669 | case 0: |
670 | longhaul_version = TYPE_LONGHAUL_V1; | ||
605 | cpu_model = CPU_SAMUEL2; | 671 | cpu_model = CPU_SAMUEL2; |
606 | cpuname = "C3 'Samuel 2' [C5B]"; | 672 | cpuname = "C3 'Samuel 2' [C5B]"; |
607 | /* Note, this is not a typo, early Samuel2's had Samuel1 ratios. */ | 673 | /* Note, this is not a typo, early Samuel2's had |
608 | memcpy (clock_ratio, samuel1_clock_ratio, sizeof(samuel1_clock_ratio)); | 674 | * Samuel1 ratios. */ |
609 | memcpy (eblcr_table, samuel2_eblcr, sizeof(samuel2_eblcr)); | 675 | memcpy(clock_ratio, samuel1_clock_ratio, |
676 | sizeof(samuel1_clock_ratio)); | ||
677 | memcpy(eblcr_table, samuel2_eblcr, | ||
678 | sizeof(samuel2_eblcr)); | ||
610 | break; | 679 | break; |
611 | case 1 ... 15: | 680 | case 1 ... 15: |
681 | longhaul_version = TYPE_LONGHAUL_V2; | ||
612 | if (c->x86_mask < 8) { | 682 | if (c->x86_mask < 8) { |
613 | cpu_model = CPU_SAMUEL2; | 683 | cpu_model = CPU_SAMUEL2; |
614 | cpuname = "C3 'Samuel 2' [C5B]"; | 684 | cpuname = "C3 'Samuel 2' [C5B]"; |
@@ -616,8 +686,10 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | |||
616 | cpu_model = CPU_EZRA; | 686 | cpu_model = CPU_EZRA; |
617 | cpuname = "C3 'Ezra' [C5C]"; | 687 | cpuname = "C3 'Ezra' [C5C]"; |
618 | } | 688 | } |
619 | memcpy (clock_ratio, ezra_clock_ratio, sizeof(ezra_clock_ratio)); | 689 | memcpy(clock_ratio, ezra_clock_ratio, |
620 | memcpy (eblcr_table, ezra_eblcr, sizeof(ezra_eblcr)); | 690 | sizeof(ezra_clock_ratio)); |
691 | memcpy(eblcr_table, ezra_eblcr, | ||
692 | sizeof(ezra_eblcr)); | ||
621 | break; | 693 | break; |
622 | } | 694 | } |
623 | break; | 695 | break; |
@@ -632,24 +704,24 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | |||
632 | break; | 704 | break; |
633 | 705 | ||
634 | case 9: | 706 | case 9: |
635 | cpu_model = CPU_NEHEMIAH; | ||
636 | longhaul_version = TYPE_POWERSAVER; | 707 | longhaul_version = TYPE_POWERSAVER; |
637 | numscales=32; | 708 | numscales = 32; |
709 | memcpy(clock_ratio, | ||
710 | nehemiah_clock_ratio, | ||
711 | sizeof(nehemiah_clock_ratio)); | ||
712 | memcpy(eblcr_table, nehemiah_eblcr, sizeof(nehemiah_eblcr)); | ||
638 | switch (c->x86_mask) { | 713 | switch (c->x86_mask) { |
639 | case 0 ... 1: | 714 | case 0 ... 1: |
640 | cpuname = "C3 'Nehemiah A' [C5N]"; | 715 | cpu_model = CPU_NEHEMIAH; |
641 | memcpy (clock_ratio, nehemiah_a_clock_ratio, sizeof(nehemiah_a_clock_ratio)); | 716 | cpuname = "C3 'Nehemiah A' [C5XLOE]"; |
642 | memcpy (eblcr_table, nehemiah_a_eblcr, sizeof(nehemiah_a_eblcr)); | ||
643 | break; | 717 | break; |
644 | case 2 ... 4: | 718 | case 2 ... 4: |
645 | cpuname = "C3 'Nehemiah B' [C5N]"; | 719 | cpu_model = CPU_NEHEMIAH; |
646 | memcpy (clock_ratio, nehemiah_b_clock_ratio, sizeof(nehemiah_b_clock_ratio)); | 720 | cpuname = "C3 'Nehemiah B' [C5XLOH]"; |
647 | memcpy (eblcr_table, nehemiah_b_eblcr, sizeof(nehemiah_b_eblcr)); | ||
648 | break; | 721 | break; |
649 | case 5 ... 15: | 722 | case 5 ... 15: |
650 | cpuname = "C3 'Nehemiah C' [C5N]"; | 723 | cpu_model = CPU_NEHEMIAH_C; |
651 | memcpy (clock_ratio, nehemiah_c_clock_ratio, sizeof(nehemiah_c_clock_ratio)); | 724 | cpuname = "C3 'Nehemiah C' [C5P]"; |
652 | memcpy (eblcr_table, nehemiah_c_eblcr, sizeof(nehemiah_c_eblcr)); | ||
653 | break; | 725 | break; |
654 | } | 726 | } |
655 | break; | 727 | break; |
@@ -658,6 +730,13 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | |||
658 | cpuname = "Unknown"; | 730 | cpuname = "Unknown"; |
659 | break; | 731 | break; |
660 | } | 732 | } |
733 | /* Check Longhaul ver. 2 */ | ||
734 | if (longhaul_version == TYPE_LONGHAUL_V2) { | ||
735 | rdmsr(MSR_VIA_LONGHAUL, lo, hi); | ||
736 | if (lo == 0 && hi == 0) | ||
737 | /* Looks like MSR isn't present */ | ||
738 | longhaul_version = TYPE_LONGHAUL_V1; | ||
739 | } | ||
661 | 740 | ||
662 | printk (KERN_INFO PFX "VIA %s CPU detected. ", cpuname); | 741 | printk (KERN_INFO PFX "VIA %s CPU detected. ", cpuname); |
663 | switch (longhaul_version) { | 742 | switch (longhaul_version) { |
@@ -670,15 +749,18 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | |||
670 | break; | 749 | break; |
671 | }; | 750 | }; |
672 | 751 | ||
752 | /* Doesn't hurt */ | ||
753 | vt8235_present = longhaul_setup_vt8235(); | ||
754 | |||
673 | /* Find ACPI data for processor */ | 755 | /* Find ACPI data for processor */ |
674 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, | 756 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, |
675 | &longhaul_walk_callback, NULL, (void *)&pr); | 757 | ACPI_UINT32_MAX, &longhaul_walk_callback, |
758 | NULL, (void *)&pr); | ||
676 | 759 | ||
677 | /* Check ACPI support for C3 state */ | 760 | /* Check ACPI support for C3 state */ |
678 | if ((pr != NULL) && (longhaul_version == TYPE_POWERSAVER)) { | 761 | if (pr != NULL && longhaul_version != TYPE_LONGHAUL_V1) { |
679 | cx = &pr->power.states[ACPI_STATE_C3]; | 762 | cx = &pr->power.states[ACPI_STATE_C3]; |
680 | if (cx->address > 0 && | 763 | if (cx->address > 0 && cx->latency <= 1000) { |
681 | (cx->latency <= 1000 || ignore_latency != 0) ) { | ||
682 | longhaul_flags |= USE_ACPI_C3; | 764 | longhaul_flags |= USE_ACPI_C3; |
683 | goto print_support_type; | 765 | goto print_support_type; |
684 | } | 766 | } |
@@ -688,8 +770,11 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | |||
688 | longhaul_flags |= USE_NORTHBRIDGE; | 770 | longhaul_flags |= USE_NORTHBRIDGE; |
689 | goto print_support_type; | 771 | goto print_support_type; |
690 | } | 772 | } |
691 | 773 | /* Use VT8235 southbridge if present */ | |
692 | /* No ACPI C3 or we can't use it */ | 774 | if (longhaul_version == TYPE_POWERSAVER && vt8235_present) { |
775 | longhaul_flags |= USE_VT8235; | ||
776 | goto print_support_type; | ||
777 | } | ||
693 | /* Check ACPI support for bus master arbiter disable */ | 778 | /* Check ACPI support for bus master arbiter disable */ |
694 | if ((pr == NULL) || !(pr->flags.bm_control)) { | 779 | if ((pr == NULL) || !(pr->flags.bm_control)) { |
695 | printk(KERN_ERR PFX | 780 | printk(KERN_ERR PFX |
@@ -698,18 +783,18 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | |||
698 | } | 783 | } |
699 | 784 | ||
700 | print_support_type: | 785 | print_support_type: |
701 | if (!(longhaul_flags & USE_NORTHBRIDGE)) { | 786 | if (longhaul_flags & USE_NORTHBRIDGE) |
702 | printk (KERN_INFO PFX "Using ACPI support.\n"); | ||
703 | } else { | ||
704 | printk (KERN_INFO PFX "Using northbridge support.\n"); | 787 | printk (KERN_INFO PFX "Using northbridge support.\n"); |
705 | } | 788 | else if (longhaul_flags & USE_VT8235) |
789 | printk (KERN_INFO PFX "Using VT8235 support.\n"); | ||
790 | else | ||
791 | printk (KERN_INFO PFX "Using ACPI support.\n"); | ||
706 | 792 | ||
707 | ret = longhaul_get_ranges(); | 793 | ret = longhaul_get_ranges(); |
708 | if (ret != 0) | 794 | if (ret != 0) |
709 | return ret; | 795 | return ret; |
710 | 796 | ||
711 | if ((longhaul_version==TYPE_LONGHAUL_V2 || longhaul_version==TYPE_POWERSAVER) && | 797 | if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0)) |
712 | (scale_voltage != 0)) | ||
713 | longhaul_setup_voltagescaling(); | 798 | longhaul_setup_voltagescaling(); |
714 | 799 | ||
715 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | 800 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; |
@@ -797,8 +882,6 @@ static void __exit longhaul_exit(void) | |||
797 | 882 | ||
798 | module_param (scale_voltage, int, 0644); | 883 | module_param (scale_voltage, int, 0644); |
799 | MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor"); | 884 | MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor"); |
800 | module_param(ignore_latency, int, 0644); | ||
801 | MODULE_PARM_DESC(ignore_latency, "Skip ACPI C3 latency test"); | ||
802 | 885 | ||
803 | MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>"); | 886 | MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>"); |
804 | MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); | 887 | MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); |
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.h b/arch/i386/kernel/cpu/cpufreq/longhaul.h index bc4682aad69b..bb0a04b1d1ab 100644 --- a/arch/i386/kernel/cpu/cpufreq/longhaul.h +++ b/arch/i386/kernel/cpu/cpufreq/longhaul.h | |||
@@ -235,84 +235,14 @@ static int __initdata ezrat_eblcr[32] = { | |||
235 | /* | 235 | /* |
236 | * VIA C3 Nehemiah */ | 236 | * VIA C3 Nehemiah */ |
237 | 237 | ||
238 | static int __initdata nehemiah_a_clock_ratio[32] = { | 238 | static int __initdata nehemiah_clock_ratio[32] = { |
239 | 100, /* 0000 -> 10.0x */ | 239 | 100, /* 0000 -> 10.0x */ |
240 | 160, /* 0001 -> 16.0x */ | 240 | 160, /* 0001 -> 16.0x */ |
241 | -1, /* 0010 -> RESERVED */ | 241 | 40, /* 0010 -> 4.0x */ |
242 | 90, /* 0011 -> 9.0x */ | ||
243 | 95, /* 0100 -> 9.5x */ | ||
244 | -1, /* 0101 -> RESERVED */ | ||
245 | -1, /* 0110 -> RESERVED */ | ||
246 | 55, /* 0111 -> 5.5x */ | ||
247 | 60, /* 1000 -> 6.0x */ | ||
248 | 70, /* 1001 -> 7.0x */ | ||
249 | 80, /* 1010 -> 8.0x */ | ||
250 | 50, /* 1011 -> 5.0x */ | ||
251 | 65, /* 1100 -> 6.5x */ | ||
252 | 75, /* 1101 -> 7.5x */ | ||
253 | 85, /* 1110 -> 8.5x */ | ||
254 | 120, /* 1111 -> 12.0x */ | ||
255 | 100, /* 0000 -> 10.0x */ | ||
256 | -1, /* 0001 -> RESERVED */ | ||
257 | 120, /* 0010 -> 12.0x */ | ||
258 | 90, /* 0011 -> 9.0x */ | ||
259 | 105, /* 0100 -> 10.5x */ | ||
260 | 115, /* 0101 -> 11.5x */ | ||
261 | 125, /* 0110 -> 12.5x */ | ||
262 | 135, /* 0111 -> 13.5x */ | ||
263 | 140, /* 1000 -> 14.0x */ | ||
264 | 150, /* 1001 -> 15.0x */ | ||
265 | 160, /* 1010 -> 16.0x */ | ||
266 | 130, /* 1011 -> 13.0x */ | ||
267 | 145, /* 1100 -> 14.5x */ | ||
268 | 155, /* 1101 -> 15.5x */ | ||
269 | -1, /* 1110 -> RESERVED (13.0x) */ | ||
270 | 120, /* 1111 -> 12.0x */ | ||
271 | }; | ||
272 | |||
273 | static int __initdata nehemiah_b_clock_ratio[32] = { | ||
274 | 100, /* 0000 -> 10.0x */ | ||
275 | 160, /* 0001 -> 16.0x */ | ||
276 | -1, /* 0010 -> RESERVED */ | ||
277 | 90, /* 0011 -> 9.0x */ | ||
278 | 95, /* 0100 -> 9.5x */ | ||
279 | -1, /* 0101 -> RESERVED */ | ||
280 | -1, /* 0110 -> RESERVED */ | ||
281 | 55, /* 0111 -> 5.5x */ | ||
282 | 60, /* 1000 -> 6.0x */ | ||
283 | 70, /* 1001 -> 7.0x */ | ||
284 | 80, /* 1010 -> 8.0x */ | ||
285 | 50, /* 1011 -> 5.0x */ | ||
286 | 65, /* 1100 -> 6.5x */ | ||
287 | 75, /* 1101 -> 7.5x */ | ||
288 | 85, /* 1110 -> 8.5x */ | ||
289 | 120, /* 1111 -> 12.0x */ | ||
290 | 100, /* 0000 -> 10.0x */ | ||
291 | 110, /* 0001 -> 11.0x */ | ||
292 | 120, /* 0010 -> 12.0x */ | ||
293 | 90, /* 0011 -> 9.0x */ | ||
294 | 105, /* 0100 -> 10.5x */ | ||
295 | 115, /* 0101 -> 11.5x */ | ||
296 | 125, /* 0110 -> 12.5x */ | ||
297 | 135, /* 0111 -> 13.5x */ | ||
298 | 140, /* 1000 -> 14.0x */ | ||
299 | 150, /* 1001 -> 15.0x */ | ||
300 | 160, /* 1010 -> 16.0x */ | ||
301 | 130, /* 1011 -> 13.0x */ | ||
302 | 145, /* 1100 -> 14.5x */ | ||
303 | 155, /* 1101 -> 15.5x */ | ||
304 | -1, /* 1110 -> RESERVED (13.0x) */ | ||
305 | 120, /* 1111 -> 12.0x */ | ||
306 | }; | ||
307 | |||
308 | static int __initdata nehemiah_c_clock_ratio[32] = { | ||
309 | 100, /* 0000 -> 10.0x */ | ||
310 | 160, /* 0001 -> 16.0x */ | ||
311 | 40, /* 0010 -> RESERVED */ | ||
312 | 90, /* 0011 -> 9.0x */ | 242 | 90, /* 0011 -> 9.0x */ |
313 | 95, /* 0100 -> 9.5x */ | 243 | 95, /* 0100 -> 9.5x */ |
314 | -1, /* 0101 -> RESERVED */ | 244 | -1, /* 0101 -> RESERVED */ |
315 | 45, /* 0110 -> RESERVED */ | 245 | 45, /* 0110 -> 4.5x */ |
316 | 55, /* 0111 -> 5.5x */ | 246 | 55, /* 0111 -> 5.5x */ |
317 | 60, /* 1000 -> 6.0x */ | 247 | 60, /* 1000 -> 6.0x */ |
318 | 70, /* 1001 -> 7.0x */ | 248 | 70, /* 1001 -> 7.0x */ |
@@ -340,84 +270,14 @@ static int __initdata nehemiah_c_clock_ratio[32] = { | |||
340 | 120, /* 1111 -> 12.0x */ | 270 | 120, /* 1111 -> 12.0x */ |
341 | }; | 271 | }; |
342 | 272 | ||
343 | static int __initdata nehemiah_a_eblcr[32] = { | 273 | static int __initdata nehemiah_eblcr[32] = { |
344 | 50, /* 0000 -> 5.0x */ | ||
345 | 160, /* 0001 -> 16.0x */ | ||
346 | -1, /* 0010 -> RESERVED */ | ||
347 | 100, /* 0011 -> 10.0x */ | ||
348 | 55, /* 0100 -> 5.5x */ | ||
349 | -1, /* 0101 -> RESERVED */ | ||
350 | -1, /* 0110 -> RESERVED */ | ||
351 | 95, /* 0111 -> 9.5x */ | ||
352 | 90, /* 1000 -> 9.0x */ | ||
353 | 70, /* 1001 -> 7.0x */ | ||
354 | 80, /* 1010 -> 8.0x */ | ||
355 | 60, /* 1011 -> 6.0x */ | ||
356 | 120, /* 1100 -> 12.0x */ | ||
357 | 75, /* 1101 -> 7.5x */ | ||
358 | 85, /* 1110 -> 8.5x */ | ||
359 | 65, /* 1111 -> 6.5x */ | ||
360 | 90, /* 0000 -> 9.0x */ | ||
361 | -1, /* 0001 -> RESERVED */ | ||
362 | 120, /* 0010 -> 12.0x */ | ||
363 | 100, /* 0011 -> 10.0x */ | ||
364 | 135, /* 0100 -> 13.5x */ | ||
365 | 115, /* 0101 -> 11.5x */ | ||
366 | 125, /* 0110 -> 12.5x */ | ||
367 | 105, /* 0111 -> 10.5x */ | ||
368 | 130, /* 1000 -> 13.0x */ | ||
369 | 150, /* 1001 -> 15.0x */ | ||
370 | 160, /* 1010 -> 16.0x */ | ||
371 | 140, /* 1011 -> 14.0x */ | ||
372 | 120, /* 1100 -> 12.0x */ | ||
373 | 155, /* 1101 -> 15.5x */ | ||
374 | -1, /* 1110 -> RESERVED (13.0x) */ | ||
375 | 145 /* 1111 -> 14.5x */ | ||
376 | /* end of table */ | ||
377 | }; | ||
378 | static int __initdata nehemiah_b_eblcr[32] = { | ||
379 | 50, /* 0000 -> 5.0x */ | ||
380 | 160, /* 0001 -> 16.0x */ | ||
381 | -1, /* 0010 -> RESERVED */ | ||
382 | 100, /* 0011 -> 10.0x */ | ||
383 | 55, /* 0100 -> 5.5x */ | ||
384 | -1, /* 0101 -> RESERVED */ | ||
385 | -1, /* 0110 -> RESERVED */ | ||
386 | 95, /* 0111 -> 9.5x */ | ||
387 | 90, /* 1000 -> 9.0x */ | ||
388 | 70, /* 1001 -> 7.0x */ | ||
389 | 80, /* 1010 -> 8.0x */ | ||
390 | 60, /* 1011 -> 6.0x */ | ||
391 | 120, /* 1100 -> 12.0x */ | ||
392 | 75, /* 1101 -> 7.5x */ | ||
393 | 85, /* 1110 -> 8.5x */ | ||
394 | 65, /* 1111 -> 6.5x */ | ||
395 | 90, /* 0000 -> 9.0x */ | ||
396 | 110, /* 0001 -> 11.0x */ | ||
397 | 120, /* 0010 -> 12.0x */ | ||
398 | 100, /* 0011 -> 10.0x */ | ||
399 | 135, /* 0100 -> 13.5x */ | ||
400 | 115, /* 0101 -> 11.5x */ | ||
401 | 125, /* 0110 -> 12.5x */ | ||
402 | 105, /* 0111 -> 10.5x */ | ||
403 | 130, /* 1000 -> 13.0x */ | ||
404 | 150, /* 1001 -> 15.0x */ | ||
405 | 160, /* 1010 -> 16.0x */ | ||
406 | 140, /* 1011 -> 14.0x */ | ||
407 | 120, /* 1100 -> 12.0x */ | ||
408 | 155, /* 1101 -> 15.5x */ | ||
409 | -1, /* 1110 -> RESERVED (13.0x) */ | ||
410 | 145 /* 1111 -> 14.5x */ | ||
411 | /* end of table */ | ||
412 | }; | ||
413 | static int __initdata nehemiah_c_eblcr[32] = { | ||
414 | 50, /* 0000 -> 5.0x */ | 274 | 50, /* 0000 -> 5.0x */ |
415 | 160, /* 0001 -> 16.0x */ | 275 | 160, /* 0001 -> 16.0x */ |
416 | 40, /* 0010 -> RESERVED */ | 276 | 40, /* 0010 -> 4.0x */ |
417 | 100, /* 0011 -> 10.0x */ | 277 | 100, /* 0011 -> 10.0x */ |
418 | 55, /* 0100 -> 5.5x */ | 278 | 55, /* 0100 -> 5.5x */ |
419 | -1, /* 0101 -> RESERVED */ | 279 | -1, /* 0101 -> RESERVED */ |
420 | 45, /* 0110 -> RESERVED */ | 280 | 45, /* 0110 -> 4.5x */ |
421 | 95, /* 0111 -> 9.5x */ | 281 | 95, /* 0111 -> 9.5x */ |
422 | 90, /* 1000 -> 9.0x */ | 282 | 90, /* 1000 -> 9.0x */ |
423 | 70, /* 1001 -> 7.0x */ | 283 | 70, /* 1001 -> 7.0x */ |
@@ -443,7 +303,6 @@ static int __initdata nehemiah_c_eblcr[32] = { | |||
443 | 155, /* 1101 -> 15.5x */ | 303 | 155, /* 1101 -> 15.5x */ |
444 | -1, /* 1110 -> RESERVED (13.0x) */ | 304 | -1, /* 1110 -> RESERVED (13.0x) */ |
445 | 145 /* 1111 -> 14.5x */ | 305 | 145 /* 1111 -> 14.5x */ |
446 | /* end of table */ | ||
447 | }; | 306 | }; |
448 | 307 | ||
449 | /* | 308 | /* |
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index 2d6491672559..fe3b67005ebb 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -1289,7 +1289,11 @@ static unsigned int powernowk8_get (unsigned int cpu) | |||
1289 | if (query_current_values_with_pending_wait(data)) | 1289 | if (query_current_values_with_pending_wait(data)) |
1290 | goto out; | 1290 | goto out; |
1291 | 1291 | ||
1292 | khz = find_khz_freq_from_fid(data->currfid); | 1292 | if (cpu_family == CPU_HW_PSTATE) |
1293 | khz = find_khz_freq_from_fiddid(data->currfid, data->currdid); | ||
1294 | else | ||
1295 | khz = find_khz_freq_from_fid(data->currfid); | ||
1296 | |||
1293 | 1297 | ||
1294 | out: | 1298 | out: |
1295 | set_cpus_allowed(current, oldmask); | 1299 | set_cpus_allowed(current, oldmask); |
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 491779af8d55..d155e81b5c97 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig | |||
@@ -16,7 +16,7 @@ config CPU_FREQ | |||
16 | if CPU_FREQ | 16 | if CPU_FREQ |
17 | 17 | ||
18 | config CPU_FREQ_TABLE | 18 | config CPU_FREQ_TABLE |
19 | def_tristate m | 19 | tristate |
20 | 20 | ||
21 | config CPU_FREQ_DEBUG | 21 | config CPU_FREQ_DEBUG |
22 | bool "Enable CPUfreq debugging" | 22 | bool "Enable CPUfreq debugging" |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index a45cc89e387a..f52facc570f5 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -41,8 +41,67 @@ static struct cpufreq_driver *cpufreq_driver; | |||
41 | static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; | 41 | static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; |
42 | static DEFINE_SPINLOCK(cpufreq_driver_lock); | 42 | static DEFINE_SPINLOCK(cpufreq_driver_lock); |
43 | 43 | ||
44 | /* | ||
45 | * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure | ||
46 | * all cpufreq/hotplug/workqueue/etc related lock issues. | ||
47 | * | ||
48 | * The rules for this semaphore: | ||
49 | * - Any routine that wants to read from the policy structure will | ||
50 | * do a down_read on this semaphore. | ||
51 | * - Any routine that will write to the policy structure and/or may take away | ||
52 | * the policy altogether (eg. CPU hotplug), will hold this lock in write | ||
53 | * mode before doing so. | ||
54 | * | ||
55 | * Additional rules: | ||
56 | * - All holders of the lock should check to make sure that the CPU they | ||
57 | * are concerned with are online after they get the lock. | ||
58 | * - Governor routines that can be called in cpufreq hotplug path should not | ||
59 | * take this sem as top level hotplug notifier handler takes this. | ||
60 | */ | ||
61 | static DEFINE_PER_CPU(int, policy_cpu); | ||
62 | static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); | ||
63 | |||
64 | #define lock_policy_rwsem(mode, cpu) \ | ||
65 | int lock_policy_rwsem_##mode \ | ||
66 | (int cpu) \ | ||
67 | { \ | ||
68 | int policy_cpu = per_cpu(policy_cpu, cpu); \ | ||
69 | BUG_ON(policy_cpu == -1); \ | ||
70 | down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ | ||
71 | if (unlikely(!cpu_online(cpu))) { \ | ||
72 | up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ | ||
73 | return -1; \ | ||
74 | } \ | ||
75 | \ | ||
76 | return 0; \ | ||
77 | } | ||
78 | |||
79 | lock_policy_rwsem(read, cpu); | ||
80 | EXPORT_SYMBOL_GPL(lock_policy_rwsem_read); | ||
81 | |||
82 | lock_policy_rwsem(write, cpu); | ||
83 | EXPORT_SYMBOL_GPL(lock_policy_rwsem_write); | ||
84 | |||
85 | void unlock_policy_rwsem_read(int cpu) | ||
86 | { | ||
87 | int policy_cpu = per_cpu(policy_cpu, cpu); | ||
88 | BUG_ON(policy_cpu == -1); | ||
89 | up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); | ||
90 | } | ||
91 | EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read); | ||
92 | |||
93 | void unlock_policy_rwsem_write(int cpu) | ||
94 | { | ||
95 | int policy_cpu = per_cpu(policy_cpu, cpu); | ||
96 | BUG_ON(policy_cpu == -1); | ||
97 | up_write(&per_cpu(cpu_policy_rwsem, policy_cpu)); | ||
98 | } | ||
99 | EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write); | ||
100 | |||
101 | |||
44 | /* internal prototypes */ | 102 | /* internal prototypes */ |
45 | static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); | 103 | static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); |
104 | static unsigned int __cpufreq_get(unsigned int cpu); | ||
46 | static void handle_update(struct work_struct *work); | 105 | static void handle_update(struct work_struct *work); |
47 | 106 | ||
48 | /** | 107 | /** |
@@ -415,12 +474,8 @@ static ssize_t store_##file_name \ | |||
415 | if (ret != 1) \ | 474 | if (ret != 1) \ |
416 | return -EINVAL; \ | 475 | return -EINVAL; \ |
417 | \ | 476 | \ |
418 | lock_cpu_hotplug(); \ | ||
419 | mutex_lock(&policy->lock); \ | ||
420 | ret = __cpufreq_set_policy(policy, &new_policy); \ | 477 | ret = __cpufreq_set_policy(policy, &new_policy); \ |
421 | policy->user_policy.object = policy->object; \ | 478 | policy->user_policy.object = policy->object; \ |
422 | mutex_unlock(&policy->lock); \ | ||
423 | unlock_cpu_hotplug(); \ | ||
424 | \ | 479 | \ |
425 | return ret ? ret : count; \ | 480 | return ret ? ret : count; \ |
426 | } | 481 | } |
@@ -434,7 +489,7 @@ store_one(scaling_max_freq,max); | |||
434 | static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, | 489 | static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, |
435 | char *buf) | 490 | char *buf) |
436 | { | 491 | { |
437 | unsigned int cur_freq = cpufreq_get(policy->cpu); | 492 | unsigned int cur_freq = __cpufreq_get(policy->cpu); |
438 | if (!cur_freq) | 493 | if (!cur_freq) |
439 | return sprintf(buf, "<unknown>"); | 494 | return sprintf(buf, "<unknown>"); |
440 | return sprintf(buf, "%u\n", cur_freq); | 495 | return sprintf(buf, "%u\n", cur_freq); |
@@ -479,18 +534,12 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy, | |||
479 | &new_policy.governor)) | 534 | &new_policy.governor)) |
480 | return -EINVAL; | 535 | return -EINVAL; |
481 | 536 | ||
482 | lock_cpu_hotplug(); | ||
483 | |||
484 | /* Do not use cpufreq_set_policy here or the user_policy.max | 537 | /* Do not use cpufreq_set_policy here or the user_policy.max |
485 | will be wrongly overridden */ | 538 | will be wrongly overridden */ |
486 | mutex_lock(&policy->lock); | ||
487 | ret = __cpufreq_set_policy(policy, &new_policy); | 539 | ret = __cpufreq_set_policy(policy, &new_policy); |
488 | 540 | ||
489 | policy->user_policy.policy = policy->policy; | 541 | policy->user_policy.policy = policy->policy; |
490 | policy->user_policy.governor = policy->governor; | 542 | policy->user_policy.governor = policy->governor; |
491 | mutex_unlock(&policy->lock); | ||
492 | |||
493 | unlock_cpu_hotplug(); | ||
494 | 543 | ||
495 | if (ret) | 544 | if (ret) |
496 | return ret; | 545 | return ret; |
@@ -595,11 +644,17 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) | |||
595 | policy = cpufreq_cpu_get(policy->cpu); | 644 | policy = cpufreq_cpu_get(policy->cpu); |
596 | if (!policy) | 645 | if (!policy) |
597 | return -EINVAL; | 646 | return -EINVAL; |
647 | |||
648 | if (lock_policy_rwsem_read(policy->cpu) < 0) | ||
649 | return -EINVAL; | ||
650 | |||
598 | if (fattr->show) | 651 | if (fattr->show) |
599 | ret = fattr->show(policy, buf); | 652 | ret = fattr->show(policy, buf); |
600 | else | 653 | else |
601 | ret = -EIO; | 654 | ret = -EIO; |
602 | 655 | ||
656 | unlock_policy_rwsem_read(policy->cpu); | ||
657 | |||
603 | cpufreq_cpu_put(policy); | 658 | cpufreq_cpu_put(policy); |
604 | return ret; | 659 | return ret; |
605 | } | 660 | } |
@@ -613,11 +668,17 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr, | |||
613 | policy = cpufreq_cpu_get(policy->cpu); | 668 | policy = cpufreq_cpu_get(policy->cpu); |
614 | if (!policy) | 669 | if (!policy) |
615 | return -EINVAL; | 670 | return -EINVAL; |
671 | |||
672 | if (lock_policy_rwsem_write(policy->cpu) < 0) | ||
673 | return -EINVAL; | ||
674 | |||
616 | if (fattr->store) | 675 | if (fattr->store) |
617 | ret = fattr->store(policy, buf, count); | 676 | ret = fattr->store(policy, buf, count); |
618 | else | 677 | else |
619 | ret = -EIO; | 678 | ret = -EIO; |
620 | 679 | ||
680 | unlock_policy_rwsem_write(policy->cpu); | ||
681 | |||
621 | cpufreq_cpu_put(policy); | 682 | cpufreq_cpu_put(policy); |
622 | return ret; | 683 | return ret; |
623 | } | 684 | } |
@@ -691,8 +752,10 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
691 | policy->cpu = cpu; | 752 | policy->cpu = cpu; |
692 | policy->cpus = cpumask_of_cpu(cpu); | 753 | policy->cpus = cpumask_of_cpu(cpu); |
693 | 754 | ||
694 | mutex_init(&policy->lock); | 755 | /* Initially set CPU itself as the policy_cpu */ |
695 | mutex_lock(&policy->lock); | 756 | per_cpu(policy_cpu, cpu) = cpu; |
757 | lock_policy_rwsem_write(cpu); | ||
758 | |||
696 | init_completion(&policy->kobj_unregister); | 759 | init_completion(&policy->kobj_unregister); |
697 | INIT_WORK(&policy->update, handle_update); | 760 | INIT_WORK(&policy->update, handle_update); |
698 | 761 | ||
@@ -702,7 +765,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
702 | ret = cpufreq_driver->init(policy); | 765 | ret = cpufreq_driver->init(policy); |
703 | if (ret) { | 766 | if (ret) { |
704 | dprintk("initialization failed\n"); | 767 | dprintk("initialization failed\n"); |
705 | mutex_unlock(&policy->lock); | 768 | unlock_policy_rwsem_write(cpu); |
706 | goto err_out; | 769 | goto err_out; |
707 | } | 770 | } |
708 | 771 | ||
@@ -716,6 +779,14 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
716 | */ | 779 | */ |
717 | managed_policy = cpufreq_cpu_get(j); | 780 | managed_policy = cpufreq_cpu_get(j); |
718 | if (unlikely(managed_policy)) { | 781 | if (unlikely(managed_policy)) { |
782 | |||
783 | /* Set proper policy_cpu */ | ||
784 | unlock_policy_rwsem_write(cpu); | ||
785 | per_cpu(policy_cpu, cpu) = managed_policy->cpu; | ||
786 | |||
787 | if (lock_policy_rwsem_write(cpu) < 0) | ||
788 | goto err_out_driver_exit; | ||
789 | |||
719 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 790 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
720 | managed_policy->cpus = policy->cpus; | 791 | managed_policy->cpus = policy->cpus; |
721 | cpufreq_cpu_data[cpu] = managed_policy; | 792 | cpufreq_cpu_data[cpu] = managed_policy; |
@@ -726,13 +797,13 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
726 | &managed_policy->kobj, | 797 | &managed_policy->kobj, |
727 | "cpufreq"); | 798 | "cpufreq"); |
728 | if (ret) { | 799 | if (ret) { |
729 | mutex_unlock(&policy->lock); | 800 | unlock_policy_rwsem_write(cpu); |
730 | goto err_out_driver_exit; | 801 | goto err_out_driver_exit; |
731 | } | 802 | } |
732 | 803 | ||
733 | cpufreq_debug_enable_ratelimit(); | 804 | cpufreq_debug_enable_ratelimit(); |
734 | mutex_unlock(&policy->lock); | ||
735 | ret = 0; | 805 | ret = 0; |
806 | unlock_policy_rwsem_write(cpu); | ||
736 | goto err_out_driver_exit; /* call driver->exit() */ | 807 | goto err_out_driver_exit; /* call driver->exit() */ |
737 | } | 808 | } |
738 | } | 809 | } |
@@ -746,7 +817,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
746 | 817 | ||
747 | ret = kobject_register(&policy->kobj); | 818 | ret = kobject_register(&policy->kobj); |
748 | if (ret) { | 819 | if (ret) { |
749 | mutex_unlock(&policy->lock); | 820 | unlock_policy_rwsem_write(cpu); |
750 | goto err_out_driver_exit; | 821 | goto err_out_driver_exit; |
751 | } | 822 | } |
752 | /* set up files for this cpu device */ | 823 | /* set up files for this cpu device */ |
@@ -761,8 +832,10 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
761 | sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); | 832 | sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); |
762 | 833 | ||
763 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 834 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
764 | for_each_cpu_mask(j, policy->cpus) | 835 | for_each_cpu_mask(j, policy->cpus) { |
765 | cpufreq_cpu_data[j] = policy; | 836 | cpufreq_cpu_data[j] = policy; |
837 | per_cpu(policy_cpu, j) = policy->cpu; | ||
838 | } | ||
766 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 839 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
767 | 840 | ||
768 | /* symlink affected CPUs */ | 841 | /* symlink affected CPUs */ |
@@ -778,14 +851,14 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
778 | ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, | 851 | ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, |
779 | "cpufreq"); | 852 | "cpufreq"); |
780 | if (ret) { | 853 | if (ret) { |
781 | mutex_unlock(&policy->lock); | 854 | unlock_policy_rwsem_write(cpu); |
782 | goto err_out_unregister; | 855 | goto err_out_unregister; |
783 | } | 856 | } |
784 | } | 857 | } |
785 | 858 | ||
786 | policy->governor = NULL; /* to assure that the starting sequence is | 859 | policy->governor = NULL; /* to assure that the starting sequence is |
787 | * run in cpufreq_set_policy */ | 860 | * run in cpufreq_set_policy */ |
788 | mutex_unlock(&policy->lock); | 861 | unlock_policy_rwsem_write(cpu); |
789 | 862 | ||
790 | /* set default policy */ | 863 | /* set default policy */ |
791 | ret = cpufreq_set_policy(&new_policy); | 864 | ret = cpufreq_set_policy(&new_policy); |
@@ -826,11 +899,13 @@ module_out: | |||
826 | 899 | ||
827 | 900 | ||
828 | /** | 901 | /** |
829 | * cpufreq_remove_dev - remove a CPU device | 902 | * __cpufreq_remove_dev - remove a CPU device |
830 | * | 903 | * |
831 | * Removes the cpufreq interface for a CPU device. | 904 | * Removes the cpufreq interface for a CPU device. |
905 | * Caller should already have policy_rwsem in write mode for this CPU. | ||
906 | * This routine frees the rwsem before returning. | ||
832 | */ | 907 | */ |
833 | static int cpufreq_remove_dev (struct sys_device * sys_dev) | 908 | static int __cpufreq_remove_dev (struct sys_device * sys_dev) |
834 | { | 909 | { |
835 | unsigned int cpu = sys_dev->id; | 910 | unsigned int cpu = sys_dev->id; |
836 | unsigned long flags; | 911 | unsigned long flags; |
@@ -849,6 +924,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
849 | if (!data) { | 924 | if (!data) { |
850 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 925 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
851 | cpufreq_debug_enable_ratelimit(); | 926 | cpufreq_debug_enable_ratelimit(); |
927 | unlock_policy_rwsem_write(cpu); | ||
852 | return -EINVAL; | 928 | return -EINVAL; |
853 | } | 929 | } |
854 | cpufreq_cpu_data[cpu] = NULL; | 930 | cpufreq_cpu_data[cpu] = NULL; |
@@ -865,6 +941,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
865 | sysfs_remove_link(&sys_dev->kobj, "cpufreq"); | 941 | sysfs_remove_link(&sys_dev->kobj, "cpufreq"); |
866 | cpufreq_cpu_put(data); | 942 | cpufreq_cpu_put(data); |
867 | cpufreq_debug_enable_ratelimit(); | 943 | cpufreq_debug_enable_ratelimit(); |
944 | unlock_policy_rwsem_write(cpu); | ||
868 | return 0; | 945 | return 0; |
869 | } | 946 | } |
870 | #endif | 947 | #endif |
@@ -873,6 +950,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
873 | if (!kobject_get(&data->kobj)) { | 950 | if (!kobject_get(&data->kobj)) { |
874 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 951 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
875 | cpufreq_debug_enable_ratelimit(); | 952 | cpufreq_debug_enable_ratelimit(); |
953 | unlock_policy_rwsem_write(cpu); | ||
876 | return -EFAULT; | 954 | return -EFAULT; |
877 | } | 955 | } |
878 | 956 | ||
@@ -906,10 +984,10 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
906 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 984 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
907 | #endif | 985 | #endif |
908 | 986 | ||
909 | mutex_lock(&data->lock); | ||
910 | if (cpufreq_driver->target) | 987 | if (cpufreq_driver->target) |
911 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | 988 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); |
912 | mutex_unlock(&data->lock); | 989 | |
990 | unlock_policy_rwsem_write(cpu); | ||
913 | 991 | ||
914 | kobject_unregister(&data->kobj); | 992 | kobject_unregister(&data->kobj); |
915 | 993 | ||
@@ -933,6 +1011,18 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
933 | } | 1011 | } |
934 | 1012 | ||
935 | 1013 | ||
1014 | static int cpufreq_remove_dev (struct sys_device * sys_dev) | ||
1015 | { | ||
1016 | unsigned int cpu = sys_dev->id; | ||
1017 | int retval; | ||
1018 | if (unlikely(lock_policy_rwsem_write(cpu))) | ||
1019 | BUG(); | ||
1020 | |||
1021 | retval = __cpufreq_remove_dev(sys_dev); | ||
1022 | return retval; | ||
1023 | } | ||
1024 | |||
1025 | |||
936 | static void handle_update(struct work_struct *work) | 1026 | static void handle_update(struct work_struct *work) |
937 | { | 1027 | { |
938 | struct cpufreq_policy *policy = | 1028 | struct cpufreq_policy *policy = |
@@ -980,9 +1070,12 @@ unsigned int cpufreq_quick_get(unsigned int cpu) | |||
980 | unsigned int ret_freq = 0; | 1070 | unsigned int ret_freq = 0; |
981 | 1071 | ||
982 | if (policy) { | 1072 | if (policy) { |
983 | mutex_lock(&policy->lock); | 1073 | if (unlikely(lock_policy_rwsem_read(cpu))) |
1074 | return ret_freq; | ||
1075 | |||
984 | ret_freq = policy->cur; | 1076 | ret_freq = policy->cur; |
985 | mutex_unlock(&policy->lock); | 1077 | |
1078 | unlock_policy_rwsem_read(cpu); | ||
986 | cpufreq_cpu_put(policy); | 1079 | cpufreq_cpu_put(policy); |
987 | } | 1080 | } |
988 | 1081 | ||
@@ -991,24 +1084,13 @@ unsigned int cpufreq_quick_get(unsigned int cpu) | |||
991 | EXPORT_SYMBOL(cpufreq_quick_get); | 1084 | EXPORT_SYMBOL(cpufreq_quick_get); |
992 | 1085 | ||
993 | 1086 | ||
994 | /** | 1087 | static unsigned int __cpufreq_get(unsigned int cpu) |
995 | * cpufreq_get - get the current CPU frequency (in kHz) | ||
996 | * @cpu: CPU number | ||
997 | * | ||
998 | * Get the CPU current (static) CPU frequency | ||
999 | */ | ||
1000 | unsigned int cpufreq_get(unsigned int cpu) | ||
1001 | { | 1088 | { |
1002 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | 1089 | struct cpufreq_policy *policy = cpufreq_cpu_data[cpu]; |
1003 | unsigned int ret_freq = 0; | 1090 | unsigned int ret_freq = 0; |
1004 | 1091 | ||
1005 | if (!policy) | ||
1006 | return 0; | ||
1007 | |||
1008 | if (!cpufreq_driver->get) | 1092 | if (!cpufreq_driver->get) |
1009 | goto out; | 1093 | return (ret_freq); |
1010 | |||
1011 | mutex_lock(&policy->lock); | ||
1012 | 1094 | ||
1013 | ret_freq = cpufreq_driver->get(cpu); | 1095 | ret_freq = cpufreq_driver->get(cpu); |
1014 | 1096 | ||
@@ -1022,11 +1104,33 @@ unsigned int cpufreq_get(unsigned int cpu) | |||
1022 | } | 1104 | } |
1023 | } | 1105 | } |
1024 | 1106 | ||
1025 | mutex_unlock(&policy->lock); | 1107 | return (ret_freq); |
1108 | } | ||
1026 | 1109 | ||
1027 | out: | 1110 | /** |
1028 | cpufreq_cpu_put(policy); | 1111 | * cpufreq_get - get the current CPU frequency (in kHz) |
1112 | * @cpu: CPU number | ||
1113 | * | ||
1114 | * Get the CPU current (static) CPU frequency | ||
1115 | */ | ||
1116 | unsigned int cpufreq_get(unsigned int cpu) | ||
1117 | { | ||
1118 | unsigned int ret_freq = 0; | ||
1119 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | ||
1120 | |||
1121 | if (!policy) | ||
1122 | goto out; | ||
1123 | |||
1124 | if (unlikely(lock_policy_rwsem_read(cpu))) | ||
1125 | goto out_policy; | ||
1126 | |||
1127 | ret_freq = __cpufreq_get(cpu); | ||
1029 | 1128 | ||
1129 | unlock_policy_rwsem_read(cpu); | ||
1130 | |||
1131 | out_policy: | ||
1132 | cpufreq_cpu_put(policy); | ||
1133 | out: | ||
1030 | return (ret_freq); | 1134 | return (ret_freq); |
1031 | } | 1135 | } |
1032 | EXPORT_SYMBOL(cpufreq_get); | 1136 | EXPORT_SYMBOL(cpufreq_get); |
@@ -1278,7 +1382,6 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); | |||
1278 | *********************************************************************/ | 1382 | *********************************************************************/ |
1279 | 1383 | ||
1280 | 1384 | ||
1281 | /* Must be called with lock_cpu_hotplug held */ | ||
1282 | int __cpufreq_driver_target(struct cpufreq_policy *policy, | 1385 | int __cpufreq_driver_target(struct cpufreq_policy *policy, |
1283 | unsigned int target_freq, | 1386 | unsigned int target_freq, |
1284 | unsigned int relation) | 1387 | unsigned int relation) |
@@ -1304,20 +1407,19 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1304 | if (!policy) | 1407 | if (!policy) |
1305 | return -EINVAL; | 1408 | return -EINVAL; |
1306 | 1409 | ||
1307 | lock_cpu_hotplug(); | 1410 | if (unlikely(lock_policy_rwsem_write(policy->cpu))) |
1308 | mutex_lock(&policy->lock); | 1411 | return -EINVAL; |
1309 | 1412 | ||
1310 | ret = __cpufreq_driver_target(policy, target_freq, relation); | 1413 | ret = __cpufreq_driver_target(policy, target_freq, relation); |
1311 | 1414 | ||
1312 | mutex_unlock(&policy->lock); | 1415 | unlock_policy_rwsem_write(policy->cpu); |
1313 | unlock_cpu_hotplug(); | ||
1314 | 1416 | ||
1315 | cpufreq_cpu_put(policy); | 1417 | cpufreq_cpu_put(policy); |
1316 | return ret; | 1418 | return ret; |
1317 | } | 1419 | } |
1318 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); | 1420 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); |
1319 | 1421 | ||
1320 | int cpufreq_driver_getavg(struct cpufreq_policy *policy) | 1422 | int __cpufreq_driver_getavg(struct cpufreq_policy *policy) |
1321 | { | 1423 | { |
1322 | int ret = 0; | 1424 | int ret = 0; |
1323 | 1425 | ||
@@ -1325,20 +1427,15 @@ int cpufreq_driver_getavg(struct cpufreq_policy *policy) | |||
1325 | if (!policy) | 1427 | if (!policy) |
1326 | return -EINVAL; | 1428 | return -EINVAL; |
1327 | 1429 | ||
1328 | mutex_lock(&policy->lock); | ||
1329 | |||
1330 | if (cpu_online(policy->cpu) && cpufreq_driver->getavg) | 1430 | if (cpu_online(policy->cpu) && cpufreq_driver->getavg) |
1331 | ret = cpufreq_driver->getavg(policy->cpu); | 1431 | ret = cpufreq_driver->getavg(policy->cpu); |
1332 | 1432 | ||
1333 | mutex_unlock(&policy->lock); | ||
1334 | |||
1335 | cpufreq_cpu_put(policy); | 1433 | cpufreq_cpu_put(policy); |
1336 | return ret; | 1434 | return ret; |
1337 | } | 1435 | } |
1338 | EXPORT_SYMBOL_GPL(cpufreq_driver_getavg); | 1436 | EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg); |
1339 | 1437 | ||
1340 | /* | 1438 | /* |
1341 | * Locking: Must be called with the lock_cpu_hotplug() lock held | ||
1342 | * when "event" is CPUFREQ_GOV_LIMITS | 1439 | * when "event" is CPUFREQ_GOV_LIMITS |
1343 | */ | 1440 | */ |
1344 | 1441 | ||
@@ -1420,9 +1517,7 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) | |||
1420 | if (!cpu_policy) | 1517 | if (!cpu_policy) |
1421 | return -EINVAL; | 1518 | return -EINVAL; |
1422 | 1519 | ||
1423 | mutex_lock(&cpu_policy->lock); | ||
1424 | memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); | 1520 | memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); |
1425 | mutex_unlock(&cpu_policy->lock); | ||
1426 | 1521 | ||
1427 | cpufreq_cpu_put(cpu_policy); | 1522 | cpufreq_cpu_put(cpu_policy); |
1428 | return 0; | 1523 | return 0; |
@@ -1433,7 +1528,6 @@ EXPORT_SYMBOL(cpufreq_get_policy); | |||
1433 | /* | 1528 | /* |
1434 | * data : current policy. | 1529 | * data : current policy. |
1435 | * policy : policy to be set. | 1530 | * policy : policy to be set. |
1436 | * Locking: Must be called with the lock_cpu_hotplug() lock held | ||
1437 | */ | 1531 | */ |
1438 | static int __cpufreq_set_policy(struct cpufreq_policy *data, | 1532 | static int __cpufreq_set_policy(struct cpufreq_policy *data, |
1439 | struct cpufreq_policy *policy) | 1533 | struct cpufreq_policy *policy) |
@@ -1539,10 +1633,9 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) | |||
1539 | if (!data) | 1633 | if (!data) |
1540 | return -EINVAL; | 1634 | return -EINVAL; |
1541 | 1635 | ||
1542 | lock_cpu_hotplug(); | 1636 | if (unlikely(lock_policy_rwsem_write(policy->cpu))) |
1637 | return -EINVAL; | ||
1543 | 1638 | ||
1544 | /* lock this CPU */ | ||
1545 | mutex_lock(&data->lock); | ||
1546 | 1639 | ||
1547 | ret = __cpufreq_set_policy(data, policy); | 1640 | ret = __cpufreq_set_policy(data, policy); |
1548 | data->user_policy.min = data->min; | 1641 | data->user_policy.min = data->min; |
@@ -1550,9 +1643,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) | |||
1550 | data->user_policy.policy = data->policy; | 1643 | data->user_policy.policy = data->policy; |
1551 | data->user_policy.governor = data->governor; | 1644 | data->user_policy.governor = data->governor; |
1552 | 1645 | ||
1553 | mutex_unlock(&data->lock); | 1646 | unlock_policy_rwsem_write(policy->cpu); |
1554 | 1647 | ||
1555 | unlock_cpu_hotplug(); | ||
1556 | cpufreq_cpu_put(data); | 1648 | cpufreq_cpu_put(data); |
1557 | 1649 | ||
1558 | return ret; | 1650 | return ret; |
@@ -1576,8 +1668,8 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1576 | if (!data) | 1668 | if (!data) |
1577 | return -ENODEV; | 1669 | return -ENODEV; |
1578 | 1670 | ||
1579 | lock_cpu_hotplug(); | 1671 | if (unlikely(lock_policy_rwsem_write(cpu))) |
1580 | mutex_lock(&data->lock); | 1672 | return -EINVAL; |
1581 | 1673 | ||
1582 | dprintk("updating policy for CPU %u\n", cpu); | 1674 | dprintk("updating policy for CPU %u\n", cpu); |
1583 | memcpy(&policy, data, sizeof(struct cpufreq_policy)); | 1675 | memcpy(&policy, data, sizeof(struct cpufreq_policy)); |
@@ -1602,8 +1694,8 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1602 | 1694 | ||
1603 | ret = __cpufreq_set_policy(data, &policy); | 1695 | ret = __cpufreq_set_policy(data, &policy); |
1604 | 1696 | ||
1605 | mutex_unlock(&data->lock); | 1697 | unlock_policy_rwsem_write(cpu); |
1606 | unlock_cpu_hotplug(); | 1698 | |
1607 | cpufreq_cpu_put(data); | 1699 | cpufreq_cpu_put(data); |
1608 | return ret; | 1700 | return ret; |
1609 | } | 1701 | } |
@@ -1613,31 +1705,28 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, | |||
1613 | unsigned long action, void *hcpu) | 1705 | unsigned long action, void *hcpu) |
1614 | { | 1706 | { |
1615 | unsigned int cpu = (unsigned long)hcpu; | 1707 | unsigned int cpu = (unsigned long)hcpu; |
1616 | struct cpufreq_policy *policy; | ||
1617 | struct sys_device *sys_dev; | 1708 | struct sys_device *sys_dev; |
1709 | struct cpufreq_policy *policy; | ||
1618 | 1710 | ||
1619 | sys_dev = get_cpu_sysdev(cpu); | 1711 | sys_dev = get_cpu_sysdev(cpu); |
1620 | |||
1621 | if (sys_dev) { | 1712 | if (sys_dev) { |
1622 | switch (action) { | 1713 | switch (action) { |
1623 | case CPU_ONLINE: | 1714 | case CPU_ONLINE: |
1624 | cpufreq_add_dev(sys_dev); | 1715 | cpufreq_add_dev(sys_dev); |
1625 | break; | 1716 | break; |
1626 | case CPU_DOWN_PREPARE: | 1717 | case CPU_DOWN_PREPARE: |
1627 | /* | 1718 | if (unlikely(lock_policy_rwsem_write(cpu))) |
1628 | * We attempt to put this cpu in lowest frequency | 1719 | BUG(); |
1629 | * possible before going down. This will permit | 1720 | |
1630 | * hardware-managed P-State to switch other related | ||
1631 | * threads to min or higher speeds if possible. | ||
1632 | */ | ||
1633 | policy = cpufreq_cpu_data[cpu]; | 1721 | policy = cpufreq_cpu_data[cpu]; |
1634 | if (policy) { | 1722 | if (policy) { |
1635 | cpufreq_driver_target(policy, policy->min, | 1723 | __cpufreq_driver_target(policy, policy->min, |
1636 | CPUFREQ_RELATION_H); | 1724 | CPUFREQ_RELATION_H); |
1637 | } | 1725 | } |
1726 | __cpufreq_remove_dev(sys_dev); | ||
1638 | break; | 1727 | break; |
1639 | case CPU_DEAD: | 1728 | case CPU_DOWN_FAILED: |
1640 | cpufreq_remove_dev(sys_dev); | 1729 | cpufreq_add_dev(sys_dev); |
1641 | break; | 1730 | break; |
1642 | } | 1731 | } |
1643 | } | 1732 | } |
@@ -1751,3 +1840,16 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) | |||
1751 | return 0; | 1840 | return 0; |
1752 | } | 1841 | } |
1753 | EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); | 1842 | EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); |
1843 | |||
1844 | static int __init cpufreq_core_init(void) | ||
1845 | { | ||
1846 | int cpu; | ||
1847 | |||
1848 | for_each_possible_cpu(cpu) { | ||
1849 | per_cpu(policy_cpu, cpu) = -1; | ||
1850 | init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); | ||
1851 | } | ||
1852 | return 0; | ||
1853 | } | ||
1854 | |||
1855 | core_initcall(cpufreq_core_init); | ||
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 05d6c22ba07c..26f440ccc3fb 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -429,14 +429,12 @@ static void dbs_check_cpu(int cpu) | |||
429 | static void do_dbs_timer(struct work_struct *work) | 429 | static void do_dbs_timer(struct work_struct *work) |
430 | { | 430 | { |
431 | int i; | 431 | int i; |
432 | lock_cpu_hotplug(); | ||
433 | mutex_lock(&dbs_mutex); | 432 | mutex_lock(&dbs_mutex); |
434 | for_each_online_cpu(i) | 433 | for_each_online_cpu(i) |
435 | dbs_check_cpu(i); | 434 | dbs_check_cpu(i); |
436 | schedule_delayed_work(&dbs_work, | 435 | schedule_delayed_work(&dbs_work, |
437 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); | 436 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); |
438 | mutex_unlock(&dbs_mutex); | 437 | mutex_unlock(&dbs_mutex); |
439 | unlock_cpu_hotplug(); | ||
440 | } | 438 | } |
441 | 439 | ||
442 | static inline void dbs_timer_init(void) | 440 | static inline void dbs_timer_init(void) |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index f697449327c6..d60bcb9d14cc 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -52,19 +52,20 @@ static unsigned int def_sampling_rate; | |||
52 | static void do_dbs_timer(struct work_struct *work); | 52 | static void do_dbs_timer(struct work_struct *work); |
53 | 53 | ||
54 | /* Sampling types */ | 54 | /* Sampling types */ |
55 | enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; | 55 | enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; |
56 | 56 | ||
57 | struct cpu_dbs_info_s { | 57 | struct cpu_dbs_info_s { |
58 | cputime64_t prev_cpu_idle; | 58 | cputime64_t prev_cpu_idle; |
59 | cputime64_t prev_cpu_wall; | 59 | cputime64_t prev_cpu_wall; |
60 | struct cpufreq_policy *cur_policy; | 60 | struct cpufreq_policy *cur_policy; |
61 | struct delayed_work work; | 61 | struct delayed_work work; |
62 | enum dbs_sample sample_type; | ||
63 | unsigned int enable; | ||
64 | struct cpufreq_frequency_table *freq_table; | 62 | struct cpufreq_frequency_table *freq_table; |
65 | unsigned int freq_lo; | 63 | unsigned int freq_lo; |
66 | unsigned int freq_lo_jiffies; | 64 | unsigned int freq_lo_jiffies; |
67 | unsigned int freq_hi_jiffies; | 65 | unsigned int freq_hi_jiffies; |
66 | int cpu; | ||
67 | unsigned int enable:1, | ||
68 | sample_type:1; | ||
68 | }; | 69 | }; |
69 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); | 70 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); |
70 | 71 | ||
@@ -402,7 +403,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
402 | if (load < (dbs_tuners_ins.up_threshold - 10)) { | 403 | if (load < (dbs_tuners_ins.up_threshold - 10)) { |
403 | unsigned int freq_next, freq_cur; | 404 | unsigned int freq_next, freq_cur; |
404 | 405 | ||
405 | freq_cur = cpufreq_driver_getavg(policy); | 406 | freq_cur = __cpufreq_driver_getavg(policy); |
406 | if (!freq_cur) | 407 | if (!freq_cur) |
407 | freq_cur = policy->cur; | 408 | freq_cur = policy->cur; |
408 | 409 | ||
@@ -423,9 +424,11 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
423 | 424 | ||
424 | static void do_dbs_timer(struct work_struct *work) | 425 | static void do_dbs_timer(struct work_struct *work) |
425 | { | 426 | { |
426 | unsigned int cpu = smp_processor_id(); | 427 | struct cpu_dbs_info_s *dbs_info = |
427 | struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); | 428 | container_of(work, struct cpu_dbs_info_s, work.work); |
428 | enum dbs_sample sample_type = dbs_info->sample_type; | 429 | unsigned int cpu = dbs_info->cpu; |
430 | int sample_type = dbs_info->sample_type; | ||
431 | |||
429 | /* We want all CPUs to do sampling nearly on same jiffy */ | 432 | /* We want all CPUs to do sampling nearly on same jiffy */ |
430 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 433 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); |
431 | 434 | ||
@@ -434,15 +437,19 @@ static void do_dbs_timer(struct work_struct *work) | |||
434 | 437 | ||
435 | delay -= jiffies % delay; | 438 | delay -= jiffies % delay; |
436 | 439 | ||
437 | if (!dbs_info->enable) | 440 | if (lock_policy_rwsem_write(cpu) < 0) |
441 | return; | ||
442 | |||
443 | if (!dbs_info->enable) { | ||
444 | unlock_policy_rwsem_write(cpu); | ||
438 | return; | 445 | return; |
446 | } | ||
447 | |||
439 | /* Common NORMAL_SAMPLE setup */ | 448 | /* Common NORMAL_SAMPLE setup */ |
440 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | 449 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; |
441 | if (!dbs_tuners_ins.powersave_bias || | 450 | if (!dbs_tuners_ins.powersave_bias || |
442 | sample_type == DBS_NORMAL_SAMPLE) { | 451 | sample_type == DBS_NORMAL_SAMPLE) { |
443 | lock_cpu_hotplug(); | ||
444 | dbs_check_cpu(dbs_info); | 452 | dbs_check_cpu(dbs_info); |
445 | unlock_cpu_hotplug(); | ||
446 | if (dbs_info->freq_lo) { | 453 | if (dbs_info->freq_lo) { |
447 | /* Setup timer for SUB_SAMPLE */ | 454 | /* Setup timer for SUB_SAMPLE */ |
448 | dbs_info->sample_type = DBS_SUB_SAMPLE; | 455 | dbs_info->sample_type = DBS_SUB_SAMPLE; |
@@ -454,26 +461,27 @@ static void do_dbs_timer(struct work_struct *work) | |||
454 | CPUFREQ_RELATION_H); | 461 | CPUFREQ_RELATION_H); |
455 | } | 462 | } |
456 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); | 463 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); |
464 | unlock_policy_rwsem_write(cpu); | ||
457 | } | 465 | } |
458 | 466 | ||
459 | static inline void dbs_timer_init(unsigned int cpu) | 467 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) |
460 | { | 468 | { |
461 | struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); | ||
462 | /* We want all CPUs to do sampling nearly on same jiffy */ | 469 | /* We want all CPUs to do sampling nearly on same jiffy */ |
463 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 470 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); |
464 | delay -= jiffies % delay; | 471 | delay -= jiffies % delay; |
465 | 472 | ||
473 | dbs_info->enable = 1; | ||
466 | ondemand_powersave_bias_init(); | 474 | ondemand_powersave_bias_init(); |
467 | INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer); | ||
468 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | 475 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; |
469 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); | 476 | INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer); |
477 | queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, | ||
478 | delay); | ||
470 | } | 479 | } |
471 | 480 | ||
472 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 481 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) |
473 | { | 482 | { |
474 | dbs_info->enable = 0; | 483 | dbs_info->enable = 0; |
475 | cancel_delayed_work(&dbs_info->work); | 484 | cancel_delayed_work(&dbs_info->work); |
476 | flush_workqueue(kondemand_wq); | ||
477 | } | 485 | } |
478 | 486 | ||
479 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | 487 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, |
@@ -502,21 +510,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
502 | 510 | ||
503 | mutex_lock(&dbs_mutex); | 511 | mutex_lock(&dbs_mutex); |
504 | dbs_enable++; | 512 | dbs_enable++; |
505 | if (dbs_enable == 1) { | ||
506 | kondemand_wq = create_workqueue("kondemand"); | ||
507 | if (!kondemand_wq) { | ||
508 | printk(KERN_ERR | ||
509 | "Creation of kondemand failed\n"); | ||
510 | dbs_enable--; | ||
511 | mutex_unlock(&dbs_mutex); | ||
512 | return -ENOSPC; | ||
513 | } | ||
514 | } | ||
515 | 513 | ||
516 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); | 514 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); |
517 | if (rc) { | 515 | if (rc) { |
518 | if (dbs_enable == 1) | ||
519 | destroy_workqueue(kondemand_wq); | ||
520 | dbs_enable--; | 516 | dbs_enable--; |
521 | mutex_unlock(&dbs_mutex); | 517 | mutex_unlock(&dbs_mutex); |
522 | return rc; | 518 | return rc; |
@@ -530,7 +526,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
530 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); | 526 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); |
531 | j_dbs_info->prev_cpu_wall = get_jiffies_64(); | 527 | j_dbs_info->prev_cpu_wall = get_jiffies_64(); |
532 | } | 528 | } |
533 | this_dbs_info->enable = 1; | 529 | this_dbs_info->cpu = cpu; |
534 | /* | 530 | /* |
535 | * Start the timerschedule work, when this governor | 531 | * Start the timerschedule work, when this governor |
536 | * is used for first time | 532 | * is used for first time |
@@ -550,7 +546,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
550 | 546 | ||
551 | dbs_tuners_ins.sampling_rate = def_sampling_rate; | 547 | dbs_tuners_ins.sampling_rate = def_sampling_rate; |
552 | } | 548 | } |
553 | dbs_timer_init(policy->cpu); | 549 | dbs_timer_init(this_dbs_info); |
554 | 550 | ||
555 | mutex_unlock(&dbs_mutex); | 551 | mutex_unlock(&dbs_mutex); |
556 | break; | 552 | break; |
@@ -560,9 +556,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
560 | dbs_timer_exit(this_dbs_info); | 556 | dbs_timer_exit(this_dbs_info); |
561 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); | 557 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); |
562 | dbs_enable--; | 558 | dbs_enable--; |
563 | if (dbs_enable == 0) | ||
564 | destroy_workqueue(kondemand_wq); | ||
565 | |||
566 | mutex_unlock(&dbs_mutex); | 559 | mutex_unlock(&dbs_mutex); |
567 | 560 | ||
568 | break; | 561 | break; |
@@ -591,12 +584,18 @@ static struct cpufreq_governor cpufreq_gov_dbs = { | |||
591 | 584 | ||
592 | static int __init cpufreq_gov_dbs_init(void) | 585 | static int __init cpufreq_gov_dbs_init(void) |
593 | { | 586 | { |
587 | kondemand_wq = create_workqueue("kondemand"); | ||
588 | if (!kondemand_wq) { | ||
589 | printk(KERN_ERR "Creation of kondemand failed\n"); | ||
590 | return -EFAULT; | ||
591 | } | ||
594 | return cpufreq_register_governor(&cpufreq_gov_dbs); | 592 | return cpufreq_register_governor(&cpufreq_gov_dbs); |
595 | } | 593 | } |
596 | 594 | ||
597 | static void __exit cpufreq_gov_dbs_exit(void) | 595 | static void __exit cpufreq_gov_dbs_exit(void) |
598 | { | 596 | { |
599 | cpufreq_unregister_governor(&cpufreq_gov_dbs); | 597 | cpufreq_unregister_governor(&cpufreq_gov_dbs); |
598 | destroy_workqueue(kondemand_wq); | ||
600 | } | 599 | } |
601 | 600 | ||
602 | 601 | ||
@@ -608,3 +607,4 @@ MODULE_LICENSE("GPL"); | |||
608 | 607 | ||
609 | module_init(cpufreq_gov_dbs_init); | 608 | module_init(cpufreq_gov_dbs_init); |
610 | module_exit(cpufreq_gov_dbs_exit); | 609 | module_exit(cpufreq_gov_dbs_exit); |
610 | |||
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 91ad342a6051..d1c7cac9316c 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -370,12 +370,10 @@ __exit cpufreq_stats_exit(void) | |||
370 | cpufreq_unregister_notifier(¬ifier_trans_block, | 370 | cpufreq_unregister_notifier(¬ifier_trans_block, |
371 | CPUFREQ_TRANSITION_NOTIFIER); | 371 | CPUFREQ_TRANSITION_NOTIFIER); |
372 | unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); | 372 | unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); |
373 | lock_cpu_hotplug(); | ||
374 | for_each_online_cpu(cpu) { | 373 | for_each_online_cpu(cpu) { |
375 | cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, | 374 | cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, |
376 | CPU_DEAD, (void *)(long)cpu); | 375 | CPU_DEAD, (void *)(long)cpu); |
377 | } | 376 | } |
378 | unlock_cpu_hotplug(); | ||
379 | } | 377 | } |
380 | 378 | ||
381 | MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>"); | 379 | MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>"); |
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 2a4eb0bfaf30..860345c7799a 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c | |||
@@ -71,7 +71,6 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy) | |||
71 | 71 | ||
72 | dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); | 72 | dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); |
73 | 73 | ||
74 | lock_cpu_hotplug(); | ||
75 | mutex_lock(&userspace_mutex); | 74 | mutex_lock(&userspace_mutex); |
76 | if (!cpu_is_managed[policy->cpu]) | 75 | if (!cpu_is_managed[policy->cpu]) |
77 | goto err; | 76 | goto err; |
@@ -94,7 +93,6 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy) | |||
94 | 93 | ||
95 | err: | 94 | err: |
96 | mutex_unlock(&userspace_mutex); | 95 | mutex_unlock(&userspace_mutex); |
97 | unlock_cpu_hotplug(); | ||
98 | return ret; | 96 | return ret; |
99 | } | 97 | } |
100 | 98 | ||
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 7f008f6bfdc3..0899e2cdcdd1 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -84,9 +84,6 @@ struct cpufreq_policy { | |||
84 | unsigned int policy; /* see above */ | 84 | unsigned int policy; /* see above */ |
85 | struct cpufreq_governor *governor; /* see below */ | 85 | struct cpufreq_governor *governor; /* see below */ |
86 | 86 | ||
87 | struct mutex lock; /* CPU ->setpolicy or ->target may | ||
88 | only be called once a time */ | ||
89 | |||
90 | struct work_struct update; /* if update_policy() needs to be | 87 | struct work_struct update; /* if update_policy() needs to be |
91 | * called, but you're in IRQ context */ | 88 | * called, but you're in IRQ context */ |
92 | 89 | ||
@@ -172,11 +169,16 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy, | |||
172 | unsigned int relation); | 169 | unsigned int relation); |
173 | 170 | ||
174 | 171 | ||
175 | extern int cpufreq_driver_getavg(struct cpufreq_policy *policy); | 172 | extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy); |
176 | 173 | ||
177 | int cpufreq_register_governor(struct cpufreq_governor *governor); | 174 | int cpufreq_register_governor(struct cpufreq_governor *governor); |
178 | void cpufreq_unregister_governor(struct cpufreq_governor *governor); | 175 | void cpufreq_unregister_governor(struct cpufreq_governor *governor); |
179 | 176 | ||
177 | int lock_policy_rwsem_read(int cpu); | ||
178 | int lock_policy_rwsem_write(int cpu); | ||
179 | void unlock_policy_rwsem_read(int cpu); | ||
180 | void unlock_policy_rwsem_write(int cpu); | ||
181 | |||
180 | 182 | ||
181 | /********************************************************************* | 183 | /********************************************************************* |
182 | * CPUFREQ DRIVER INTERFACE * | 184 | * CPUFREQ DRIVER INTERFACE * |