diff options
author | Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | 2006-10-03 15:29:15 -0400 |
---|---|---|
committer | Dave Jones <davej@redhat.com> | 2006-10-15 19:57:10 -0400 |
commit | fe27cb358835cfa525b5831ec8ddb9b9bfda3c73 (patch) | |
tree | 9681e706da7b213253c69881edaca1665f9d266a | |
parent | 519ce3ec76bf5c068e575800a9977659f7cccec4 (diff) |
[CPUFREQ][2/8] acpi: reorganize code to make MSR support addition easier
Some clean up and redsign of the driver. Mainly making it easier to add
support for multiple sub-mechanisms of changing frequency. Currently this
driver supports only ACPI SYSTEM_IO address space. With the changes
below it is easier to add support for other address spaces like Intel
Enhanced Speedstep which uses MSR (ACPI FIXED_FEATURE_HARDWARE) to do the
transitions.
Signed-off-by: Denis Sadykov <denis.m.sadykov@intel.com>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c | 467 |
1 files changed, 227 insertions, 240 deletions
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c index e902d970226f..ebc9fe285748 100644 --- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -1,9 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.3 $) | 2 | * acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.4 $) |
3 | * | 3 | * |
4 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> | 4 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> |
5 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> | 5 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
6 | * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> | 6 | * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> |
7 | * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> | ||
7 | * | 8 | * |
8 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 9 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
9 | * | 10 | * |
@@ -27,19 +28,22 @@ | |||
27 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
28 | #include <linux/module.h> | 29 | #include <linux/module.h> |
29 | #include <linux/init.h> | 30 | #include <linux/init.h> |
31 | #include <linux/smp.h> | ||
32 | #include <linux/sched.h> | ||
30 | #include <linux/cpufreq.h> | 33 | #include <linux/cpufreq.h> |
31 | #include <linux/proc_fs.h> | ||
32 | #include <linux/seq_file.h> | ||
33 | #include <linux/compiler.h> | 34 | #include <linux/compiler.h> |
34 | #include <linux/sched.h> /* current */ | 35 | #include <linux/sched.h> /* current */ |
35 | #include <linux/dmi.h> | 36 | #include <linux/dmi.h> |
36 | #include <asm/io.h> | ||
37 | #include <asm/delay.h> | ||
38 | #include <asm/uaccess.h> | ||
39 | 37 | ||
40 | #include <linux/acpi.h> | 38 | #include <linux/acpi.h> |
41 | #include <acpi/processor.h> | 39 | #include <acpi/processor.h> |
42 | 40 | ||
41 | #include <asm/io.h> | ||
42 | #include <asm/processor.h> | ||
43 | #include <asm/cpufeature.h> | ||
44 | #include <asm/delay.h> | ||
45 | #include <asm/uaccess.h> | ||
46 | |||
43 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg) | 47 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg) |
44 | 48 | ||
45 | MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); | 49 | MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); |
@@ -47,24 +51,35 @@ MODULE_DESCRIPTION("ACPI Processor P-States Driver"); | |||
47 | MODULE_LICENSE("GPL"); | 51 | MODULE_LICENSE("GPL"); |
48 | 52 | ||
49 | 53 | ||
50 | struct cpufreq_acpi_io { | 54 | struct acpi_cpufreq_data { |
51 | struct acpi_processor_performance *acpi_data; | 55 | struct acpi_processor_performance *acpi_data; |
52 | struct cpufreq_frequency_table *freq_table; | 56 | struct cpufreq_frequency_table *freq_table; |
53 | unsigned int resume; | 57 | unsigned int resume; |
54 | }; | 58 | }; |
55 | 59 | ||
56 | static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS]; | 60 | static struct acpi_cpufreq_data *drv_data[NR_CPUS]; |
57 | static struct acpi_processor_performance *acpi_perf_data[NR_CPUS]; | 61 | static struct acpi_processor_performance *acpi_perf_data[NR_CPUS]; |
58 | 62 | ||
59 | static struct cpufreq_driver acpi_cpufreq_driver; | 63 | static struct cpufreq_driver acpi_cpufreq_driver; |
60 | 64 | ||
61 | static unsigned int acpi_pstate_strict; | 65 | static unsigned int acpi_pstate_strict; |
62 | 66 | ||
63 | static int | 67 | static unsigned extract_freq(u32 value, struct acpi_cpufreq_data *data) |
64 | acpi_processor_write_port( | 68 | { |
65 | u16 port, | 69 | struct acpi_processor_performance *perf; |
66 | u8 bit_width, | 70 | int i; |
67 | u32 value) | 71 | |
72 | perf = data->acpi_data; | ||
73 | |||
74 | for (i = 0; i < perf->state_count; i++) { | ||
75 | if (value == perf->states[i].status) | ||
76 | return data->freq_table[i].frequency; | ||
77 | } | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | |||
82 | static void wrport(u16 port, u8 bit_width, u32 value) | ||
68 | { | 83 | { |
69 | if (bit_width <= 8) { | 84 | if (bit_width <= 8) { |
70 | outb(value, port); | 85 | outb(value, port); |
@@ -72,17 +87,10 @@ acpi_processor_write_port( | |||
72 | outw(value, port); | 87 | outw(value, port); |
73 | } else if (bit_width <= 32) { | 88 | } else if (bit_width <= 32) { |
74 | outl(value, port); | 89 | outl(value, port); |
75 | } else { | ||
76 | return -ENODEV; | ||
77 | } | 90 | } |
78 | return 0; | ||
79 | } | 91 | } |
80 | 92 | ||
81 | static int | 93 | static void rdport(u16 port, u8 bit_width, u32 *ret) |
82 | acpi_processor_read_port( | ||
83 | u16 port, | ||
84 | u8 bit_width, | ||
85 | u32 *ret) | ||
86 | { | 94 | { |
87 | *ret = 0; | 95 | *ret = 0; |
88 | if (bit_width <= 8) { | 96 | if (bit_width <= 8) { |
@@ -91,139 +99,141 @@ acpi_processor_read_port( | |||
91 | *ret = inw(port); | 99 | *ret = inw(port); |
92 | } else if (bit_width <= 32) { | 100 | } else if (bit_width <= 32) { |
93 | *ret = inl(port); | 101 | *ret = inl(port); |
94 | } else { | ||
95 | return -ENODEV; | ||
96 | } | 102 | } |
97 | return 0; | ||
98 | } | 103 | } |
99 | 104 | ||
100 | static int | 105 | struct io_addr { |
101 | acpi_processor_set_performance ( | 106 | u16 port; |
102 | struct cpufreq_acpi_io *data, | 107 | u8 bit_width; |
103 | unsigned int cpu, | 108 | }; |
104 | int state) | 109 | |
110 | struct drv_cmd { | ||
111 | cpumask_t mask; | ||
112 | struct io_addr addr; | ||
113 | u32 val; | ||
114 | }; | ||
115 | |||
116 | static void do_drv_read(struct drv_cmd *cmd) | ||
105 | { | 117 | { |
106 | u16 port = 0; | 118 | rdport(cmd->addr.port, cmd->addr.bit_width, &cmd->val); |
107 | u8 bit_width = 0; | 119 | return; |
108 | int i = 0; | 120 | } |
109 | int ret = 0; | ||
110 | u32 value = 0; | ||
111 | int retval; | ||
112 | struct acpi_processor_performance *perf; | ||
113 | 121 | ||
114 | dprintk("acpi_processor_set_performance\n"); | 122 | static void do_drv_write(struct drv_cmd *cmd) |
123 | { | ||
124 | wrport(cmd->addr.port, cmd->addr.bit_width, cmd->val); | ||
125 | return; | ||
126 | } | ||
115 | 127 | ||
116 | retval = 0; | 128 | static inline void drv_read(struct drv_cmd *cmd) |
117 | perf = data->acpi_data; | 129 | { |
118 | if (state == perf->state) { | 130 | cpumask_t saved_mask = current->cpus_allowed; |
119 | if (unlikely(data->resume)) { | 131 | cmd->val = 0; |
120 | dprintk("Called after resume, resetting to P%d\n", state); | 132 | |
121 | data->resume = 0; | 133 | set_cpus_allowed(current, cmd->mask); |
122 | } else { | 134 | do_drv_read(cmd); |
123 | dprintk("Already at target state (P%d)\n", state); | 135 | set_cpus_allowed(current, saved_mask); |
124 | return (retval); | 136 | |
125 | } | 137 | } |
138 | |||
139 | static void drv_write(struct drv_cmd *cmd) | ||
140 | { | ||
141 | cpumask_t saved_mask = current->cpus_allowed; | ||
142 | unsigned int i; | ||
143 | |||
144 | for_each_cpu_mask(i, cmd->mask) { | ||
145 | set_cpus_allowed(current, cpumask_of_cpu(i)); | ||
146 | do_drv_write(cmd); | ||
126 | } | 147 | } |
127 | 148 | ||
128 | dprintk("Transitioning from P%d to P%d\n", perf->state, state); | 149 | set_cpus_allowed(current, saved_mask); |
150 | return; | ||
151 | } | ||
129 | 152 | ||
130 | /* | 153 | static u32 get_cur_val(cpumask_t mask) |
131 | * First we write the target state's 'control' value to the | 154 | { |
132 | * control_register. | 155 | struct acpi_processor_performance *perf; |
133 | */ | 156 | struct drv_cmd cmd; |
134 | 157 | ||
135 | port = perf->control_register.address; | 158 | if (unlikely(cpus_empty(mask))) |
136 | bit_width = perf->control_register.bit_width; | 159 | return 0; |
137 | value = (u32) perf->states[state].control; | ||
138 | 160 | ||
139 | dprintk("Writing 0x%08x to port 0x%04x\n", value, port); | 161 | perf = drv_data[first_cpu(mask)]->acpi_data; |
162 | cmd.addr.port = perf->control_register.address; | ||
163 | cmd.addr.bit_width = perf->control_register.bit_width; | ||
164 | cmd.mask = mask; | ||
140 | 165 | ||
141 | ret = acpi_processor_write_port(port, bit_width, value); | 166 | drv_read(&cmd); |
142 | if (ret) { | ||
143 | dprintk("Invalid port width 0x%04x\n", bit_width); | ||
144 | return (ret); | ||
145 | } | ||
146 | 167 | ||
147 | /* | 168 | dprintk("get_cur_val = %u\n", cmd.val); |
148 | * Assume the write went through when acpi_pstate_strict is not used. | 169 | |
149 | * As read status_register is an expensive operation and there | 170 | return cmd.val; |
150 | * are no specific error cases where an IO port write will fail. | 171 | } |
151 | */ | ||
152 | if (acpi_pstate_strict) { | ||
153 | /* Then we read the 'status_register' and compare the value | ||
154 | * with the target state's 'status' to make sure the | ||
155 | * transition was successful. | ||
156 | * Note that we'll poll for up to 1ms (100 cycles of 10us) | ||
157 | * before giving up. | ||
158 | */ | ||
159 | |||
160 | port = perf->status_register.address; | ||
161 | bit_width = perf->status_register.bit_width; | ||
162 | |||
163 | dprintk("Looking for 0x%08x from port 0x%04x\n", | ||
164 | (u32) perf->states[state].status, port); | ||
165 | |||
166 | for (i = 0; i < 100; i++) { | ||
167 | ret = acpi_processor_read_port(port, bit_width, &value); | ||
168 | if (ret) { | ||
169 | dprintk("Invalid port width 0x%04x\n", bit_width); | ||
170 | return (ret); | ||
171 | } | ||
172 | if (value == (u32) perf->states[state].status) | ||
173 | break; | ||
174 | udelay(10); | ||
175 | } | ||
176 | } else { | ||
177 | value = (u32) perf->states[state].status; | ||
178 | } | ||
179 | 172 | ||
180 | if (unlikely(value != (u32) perf->states[state].status)) { | 173 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) |
181 | printk(KERN_WARNING "acpi-cpufreq: Transition failed\n"); | 174 | { |
182 | retval = -ENODEV; | 175 | struct acpi_cpufreq_data *data = drv_data[cpu]; |
183 | return (retval); | 176 | unsigned int freq; |
177 | |||
178 | dprintk("get_cur_freq_on_cpu (%d)\n", cpu); | ||
179 | |||
180 | if (unlikely(data == NULL || | ||
181 | data->acpi_data == NULL || | ||
182 | data->freq_table == NULL)) { | ||
183 | return 0; | ||
184 | } | 184 | } |
185 | 185 | ||
186 | dprintk("Transition successful after %d microseconds\n", i * 10); | 186 | freq = extract_freq(get_cur_val(cpumask_of_cpu(cpu)), data); |
187 | dprintk("cur freq = %u\n", freq); | ||
187 | 188 | ||
188 | perf->state = state; | 189 | return freq; |
189 | return (retval); | ||
190 | } | 190 | } |
191 | 191 | ||
192 | static unsigned int check_freqs(cpumask_t mask, unsigned int freq, | ||
193 | struct acpi_cpufreq_data *data) | ||
194 | { | ||
195 | unsigned int cur_freq; | ||
196 | unsigned int i; | ||
192 | 197 | ||
193 | static int | 198 | for (i = 0; i < 100; i++) { |
194 | acpi_cpufreq_target ( | 199 | cur_freq = extract_freq(get_cur_val(mask), data); |
195 | struct cpufreq_policy *policy, | 200 | if (cur_freq == freq) |
196 | unsigned int target_freq, | 201 | return 1; |
197 | unsigned int relation) | 202 | udelay(10); |
203 | } | ||
204 | return 0; | ||
205 | } | ||
206 | |||
207 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, | ||
208 | unsigned int target_freq, | ||
209 | unsigned int relation) | ||
198 | { | 210 | { |
199 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; | 211 | struct acpi_cpufreq_data *data = drv_data[policy->cpu]; |
200 | struct cpufreq_acpi_io *cpudata; | 212 | struct acpi_processor_performance *perf; |
201 | struct acpi_processor_performance *perf; | 213 | struct cpufreq_freqs freqs; |
202 | struct cpufreq_freqs freqs; | 214 | cpumask_t online_policy_cpus; |
203 | cpumask_t online_policy_cpus; | 215 | struct drv_cmd cmd; |
204 | cpumask_t saved_mask; | 216 | unsigned int next_state = 0; |
205 | cpumask_t set_mask; | 217 | unsigned int next_perf_state = 0; |
206 | cpumask_t covered_cpus; | 218 | unsigned int i; |
207 | unsigned int cur_state = 0; | 219 | int result = 0; |
208 | unsigned int next_state = 0; | 220 | |
209 | unsigned int result = 0; | 221 | dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); |
210 | unsigned int j; | 222 | |
211 | unsigned int tmp; | 223 | if (unlikely(data == NULL || |
212 | 224 | data->acpi_data == NULL || | |
213 | dprintk("acpi_cpufreq_setpolicy\n"); | 225 | data->freq_table == NULL)) { |
226 | return -ENODEV; | ||
227 | } | ||
214 | 228 | ||
229 | perf = data->acpi_data; | ||
215 | result = cpufreq_frequency_table_target(policy, | 230 | result = cpufreq_frequency_table_target(policy, |
216 | data->freq_table, | 231 | data->freq_table, |
217 | target_freq, | 232 | target_freq, |
218 | relation, | 233 | relation, |
219 | &next_state); | 234 | &next_state); |
220 | if (unlikely(result)) | 235 | if (unlikely(result)) |
221 | return (result); | 236 | return -ENODEV; |
222 | |||
223 | perf = data->acpi_data; | ||
224 | cur_state = perf->state; | ||
225 | freqs.old = data->freq_table[cur_state].frequency; | ||
226 | freqs.new = data->freq_table[next_state].frequency; | ||
227 | 237 | ||
228 | #ifdef CONFIG_HOTPLUG_CPU | 238 | #ifdef CONFIG_HOTPLUG_CPU |
229 | /* cpufreq holds the hotplug lock, so we are safe from here on */ | 239 | /* cpufreq holds the hotplug lock, so we are safe from here on */ |
@@ -232,85 +242,53 @@ acpi_cpufreq_target ( | |||
232 | online_policy_cpus = policy->cpus; | 242 | online_policy_cpus = policy->cpus; |
233 | #endif | 243 | #endif |
234 | 244 | ||
235 | for_each_cpu_mask(j, online_policy_cpus) { | 245 | cmd.val = get_cur_val(online_policy_cpus); |
236 | freqs.cpu = j; | 246 | freqs.old = extract_freq(cmd.val, data); |
237 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 247 | freqs.new = data->freq_table[next_state].frequency; |
248 | next_perf_state = data->freq_table[next_state].index; | ||
249 | if (freqs.new == freqs.old) { | ||
250 | if (unlikely(data->resume)) { | ||
251 | dprintk("Called after resume, resetting to P%d\n", next_perf_state); | ||
252 | data->resume = 0; | ||
253 | } else { | ||
254 | dprintk("Already at target state (P%d)\n", next_perf_state); | ||
255 | return 0; | ||
256 | } | ||
238 | } | 257 | } |
239 | 258 | ||
240 | /* | 259 | cmd.addr.port = perf->control_register.address; |
241 | * We need to call driver->target() on all or any CPU in | 260 | cmd.addr.bit_width = perf->control_register.bit_width; |
242 | * policy->cpus, depending on policy->shared_type. | 261 | cmd.val = (u32) perf->states[next_perf_state].control; |
243 | */ | ||
244 | saved_mask = current->cpus_allowed; | ||
245 | cpus_clear(covered_cpus); | ||
246 | for_each_cpu_mask(j, online_policy_cpus) { | ||
247 | /* | ||
248 | * Support for SMP systems. | ||
249 | * Make sure we are running on CPU that wants to change freq | ||
250 | */ | ||
251 | cpus_clear(set_mask); | ||
252 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) | ||
253 | cpus_or(set_mask, set_mask, online_policy_cpus); | ||
254 | else | ||
255 | cpu_set(j, set_mask); | ||
256 | |||
257 | set_cpus_allowed(current, set_mask); | ||
258 | if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) { | ||
259 | dprintk("couldn't limit to CPUs in this domain\n"); | ||
260 | result = -EAGAIN; | ||
261 | break; | ||
262 | } | ||
263 | 262 | ||
264 | cpudata = acpi_io_data[j]; | 263 | cpus_clear(cmd.mask); |
265 | result = acpi_processor_set_performance(cpudata, j, next_state); | ||
266 | if (result) { | ||
267 | result = -EAGAIN; | ||
268 | break; | ||
269 | } | ||
270 | 264 | ||
271 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) | 265 | if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) |
272 | break; | 266 | cmd.mask = online_policy_cpus; |
273 | 267 | else | |
274 | cpu_set(j, covered_cpus); | 268 | cpu_set(policy->cpu, cmd.mask); |
275 | } | ||
276 | 269 | ||
277 | for_each_cpu_mask(j, online_policy_cpus) { | 270 | for_each_cpu_mask(i, cmd.mask) { |
278 | freqs.cpu = j; | 271 | freqs.cpu = i; |
279 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 272 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
280 | } | 273 | } |
281 | 274 | ||
282 | if (unlikely(result)) { | 275 | drv_write(&cmd); |
283 | /* | ||
284 | * We have failed halfway through the frequency change. | ||
285 | * We have sent callbacks to online_policy_cpus and | ||
286 | * acpi_processor_set_performance() has been called on | ||
287 | * coverd_cpus. Best effort undo.. | ||
288 | */ | ||
289 | |||
290 | if (!cpus_empty(covered_cpus)) { | ||
291 | for_each_cpu_mask(j, covered_cpus) { | ||
292 | cpus_clear(set_mask); | ||
293 | cpu_set(j, set_mask); | ||
294 | set_cpus_allowed(current, set_mask); | ||
295 | cpudata = acpi_io_data[j]; | ||
296 | acpi_processor_set_performance(cpudata, | ||
297 | j, | ||
298 | cur_state); | ||
299 | } | ||
300 | } | ||
301 | 276 | ||
302 | tmp = freqs.new; | 277 | if (acpi_pstate_strict) { |
303 | freqs.new = freqs.old; | 278 | if (!check_freqs(cmd.mask, freqs.new, data)) { |
304 | freqs.old = tmp; | 279 | dprintk("acpi_cpufreq_target failed (%d)\n", |
305 | for_each_cpu_mask(j, online_policy_cpus) { | 280 | policy->cpu); |
306 | freqs.cpu = j; | 281 | return -EAGAIN; |
307 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
308 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
309 | } | 282 | } |
310 | } | 283 | } |
311 | 284 | ||
312 | set_cpus_allowed(current, saved_mask); | 285 | for_each_cpu_mask(i, cmd.mask) { |
313 | return (result); | 286 | freqs.cpu = i; |
287 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
288 | } | ||
289 | perf->state = next_perf_state; | ||
290 | |||
291 | return result; | ||
314 | } | 292 | } |
315 | 293 | ||
316 | 294 | ||
@@ -318,21 +296,17 @@ static int | |||
318 | acpi_cpufreq_verify ( | 296 | acpi_cpufreq_verify ( |
319 | struct cpufreq_policy *policy) | 297 | struct cpufreq_policy *policy) |
320 | { | 298 | { |
321 | unsigned int result = 0; | 299 | struct acpi_cpufreq_data *data = drv_data[policy->cpu]; |
322 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; | ||
323 | 300 | ||
324 | dprintk("acpi_cpufreq_verify\n"); | 301 | dprintk("acpi_cpufreq_verify\n"); |
325 | 302 | ||
326 | result = cpufreq_frequency_table_verify(policy, | 303 | return cpufreq_frequency_table_verify(policy, data->freq_table); |
327 | data->freq_table); | ||
328 | |||
329 | return (result); | ||
330 | } | 304 | } |
331 | 305 | ||
332 | 306 | ||
333 | static unsigned long | 307 | static unsigned long |
334 | acpi_cpufreq_guess_freq ( | 308 | acpi_cpufreq_guess_freq ( |
335 | struct cpufreq_acpi_io *data, | 309 | struct acpi_cpufreq_data *data, |
336 | unsigned int cpu) | 310 | unsigned int cpu) |
337 | { | 311 | { |
338 | struct acpi_processor_performance *perf = data->acpi_data; | 312 | struct acpi_processor_performance *perf = data->acpi_data; |
@@ -369,9 +343,10 @@ acpi_cpufreq_guess_freq ( | |||
369 | * do _PDC and _PSD and find out the processor dependency for the | 343 | * do _PDC and _PSD and find out the processor dependency for the |
370 | * actual init that will happen later... | 344 | * actual init that will happen later... |
371 | */ | 345 | */ |
372 | static int acpi_cpufreq_early_init_acpi(void) | 346 | static int acpi_cpufreq_early_init(void) |
373 | { | 347 | { |
374 | struct acpi_processor_performance *data; | 348 | struct acpi_processor_performance *data; |
349 | cpumask_t covered; | ||
375 | unsigned int i, j; | 350 | unsigned int i, j; |
376 | 351 | ||
377 | dprintk("acpi_cpufreq_early_init\n"); | 352 | dprintk("acpi_cpufreq_early_init\n"); |
@@ -380,17 +355,19 @@ static int acpi_cpufreq_early_init_acpi(void) | |||
380 | data = kzalloc(sizeof(struct acpi_processor_performance), | 355 | data = kzalloc(sizeof(struct acpi_processor_performance), |
381 | GFP_KERNEL); | 356 | GFP_KERNEL); |
382 | if (!data) { | 357 | if (!data) { |
383 | for_each_possible_cpu(j) { | 358 | for_each_cpu_mask(j, covered) { |
384 | kfree(acpi_perf_data[j]); | 359 | kfree(acpi_perf_data[j]); |
385 | acpi_perf_data[j] = NULL; | 360 | acpi_perf_data[j] = NULL; |
386 | } | 361 | } |
387 | return (-ENOMEM); | 362 | return (-ENOMEM); |
388 | } | 363 | } |
389 | acpi_perf_data[i] = data; | 364 | acpi_perf_data[i] = data; |
365 | cpu_set(i, covered); | ||
390 | } | 366 | } |
391 | 367 | ||
392 | /* Do initialization in ACPI core */ | 368 | /* Do initialization in ACPI core */ |
393 | return acpi_processor_preregister_performance(acpi_perf_data); | 369 | acpi_processor_preregister_performance(acpi_perf_data); |
370 | return 0; | ||
394 | } | 371 | } |
395 | 372 | ||
396 | /* | 373 | /* |
@@ -424,11 +401,12 @@ static int | |||
424 | acpi_cpufreq_cpu_init ( | 401 | acpi_cpufreq_cpu_init ( |
425 | struct cpufreq_policy *policy) | 402 | struct cpufreq_policy *policy) |
426 | { | 403 | { |
427 | unsigned int i; | 404 | unsigned int i; |
428 | unsigned int cpu = policy->cpu; | 405 | unsigned int valid_states = 0; |
429 | struct cpufreq_acpi_io *data; | 406 | unsigned int cpu = policy->cpu; |
430 | unsigned int result = 0; | 407 | struct acpi_cpufreq_data *data; |
431 | struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; | 408 | unsigned int result = 0; |
409 | struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; | ||
432 | struct acpi_processor_performance *perf; | 410 | struct acpi_processor_performance *perf; |
433 | 411 | ||
434 | dprintk("acpi_cpufreq_cpu_init\n"); | 412 | dprintk("acpi_cpufreq_cpu_init\n"); |
@@ -436,15 +414,18 @@ acpi_cpufreq_cpu_init ( | |||
436 | if (!acpi_perf_data[cpu]) | 414 | if (!acpi_perf_data[cpu]) |
437 | return (-ENODEV); | 415 | return (-ENODEV); |
438 | 416 | ||
439 | data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); | 417 | data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); |
440 | if (!data) | 418 | if (!data) |
441 | return (-ENOMEM); | 419 | return (-ENOMEM); |
442 | 420 | ||
443 | data->acpi_data = acpi_perf_data[cpu]; | 421 | data->acpi_data = acpi_perf_data[cpu]; |
444 | acpi_io_data[cpu] = data; | 422 | drv_data[cpu] = data; |
445 | 423 | ||
446 | result = acpi_processor_register_performance(data->acpi_data, cpu); | 424 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { |
425 | acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; | ||
426 | } | ||
447 | 427 | ||
428 | result = acpi_processor_register_performance(data->acpi_data, cpu); | ||
448 | if (result) | 429 | if (result) |
449 | goto err_free; | 430 | goto err_free; |
450 | 431 | ||
@@ -467,10 +448,6 @@ acpi_cpufreq_cpu_init ( | |||
467 | } | 448 | } |
468 | #endif | 449 | #endif |
469 | 450 | ||
470 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { | ||
471 | acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; | ||
472 | } | ||
473 | |||
474 | /* capability check */ | 451 | /* capability check */ |
475 | if (perf->state_count <= 1) { | 452 | if (perf->state_count <= 1) { |
476 | dprintk("No P-States\n"); | 453 | dprintk("No P-States\n"); |
@@ -478,16 +455,22 @@ acpi_cpufreq_cpu_init ( | |||
478 | goto err_unreg; | 455 | goto err_unreg; |
479 | } | 456 | } |
480 | 457 | ||
481 | if ((perf->control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) || | 458 | if (perf->control_register.space_id != perf->status_register.space_id) { |
482 | (perf->status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) { | 459 | result = -ENODEV; |
483 | dprintk("Unsupported address space [%d, %d]\n", | 460 | goto err_unreg; |
484 | (u32) (perf->control_register.space_id), | 461 | } |
485 | (u32) (perf->status_register.space_id)); | 462 | |
463 | switch (perf->control_register.space_id) { | ||
464 | case ACPI_ADR_SPACE_SYSTEM_IO: | ||
465 | dprintk("SYSTEM IO addr space\n"); | ||
466 | break; | ||
467 | default: | ||
468 | dprintk("Unknown addr space %d\n", | ||
469 | (u32) (perf->control_register.space_id)); | ||
486 | result = -ENODEV; | 470 | result = -ENODEV; |
487 | goto err_unreg; | 471 | goto err_unreg; |
488 | } | 472 | } |
489 | 473 | ||
490 | /* alloc freq_table */ | ||
491 | data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (perf->state_count + 1), GFP_KERNEL); | 474 | data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (perf->state_count + 1), GFP_KERNEL); |
492 | if (!data->freq_table) { | 475 | if (!data->freq_table) { |
493 | result = -ENOMEM; | 476 | result = -ENOMEM; |
@@ -506,14 +489,18 @@ acpi_cpufreq_cpu_init ( | |||
506 | policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); | 489 | policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); |
507 | 490 | ||
508 | /* table init */ | 491 | /* table init */ |
509 | for (i=0; i<=perf->state_count; i++) | 492 | for (i=0; i<perf->state_count; i++) |
510 | { | 493 | { |
511 | data->freq_table[i].index = i; | 494 | if ( i > 0 && perf->states[i].core_frequency == |
512 | if (i<perf->state_count) | 495 | perf->states[i - 1].core_frequency) |
513 | data->freq_table[i].frequency = perf->states[i].core_frequency * 1000; | 496 | continue; |
514 | else | 497 | |
515 | data->freq_table[i].frequency = CPUFREQ_TABLE_END; | 498 | data->freq_table[valid_states].index = i; |
499 | data->freq_table[valid_states].frequency = | ||
500 | perf->states[i].core_frequency * 1000; | ||
501 | valid_states++; | ||
516 | } | 502 | } |
503 | data->freq_table[perf->state_count].frequency = CPUFREQ_TABLE_END; | ||
517 | 504 | ||
518 | result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); | 505 | result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); |
519 | if (result) { | 506 | if (result) { |
@@ -523,8 +510,7 @@ acpi_cpufreq_cpu_init ( | |||
523 | /* notify BIOS that we exist */ | 510 | /* notify BIOS that we exist */ |
524 | acpi_processor_notify_smm(THIS_MODULE); | 511 | acpi_processor_notify_smm(THIS_MODULE); |
525 | 512 | ||
526 | printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management activated.\n", | 513 | dprintk("CPU%u - ACPI performance management activated.\n", cpu); |
527 | cpu); | ||
528 | for (i = 0; i < perf->state_count; i++) | 514 | for (i = 0; i < perf->state_count; i++) |
529 | dprintk(" %cP%d: %d MHz, %d mW, %d uS\n", | 515 | dprintk(" %cP%d: %d MHz, %d mW, %d uS\n", |
530 | (i == perf->state?'*':' '), i, | 516 | (i == perf->state?'*':' '), i, |
@@ -540,7 +526,7 @@ acpi_cpufreq_cpu_init ( | |||
540 | */ | 526 | */ |
541 | data->resume = 1; | 527 | data->resume = 1; |
542 | 528 | ||
543 | return (result); | 529 | return result; |
544 | 530 | ||
545 | err_freqfree: | 531 | err_freqfree: |
546 | kfree(data->freq_table); | 532 | kfree(data->freq_table); |
@@ -548,7 +534,7 @@ acpi_cpufreq_cpu_init ( | |||
548 | acpi_processor_unregister_performance(perf, cpu); | 534 | acpi_processor_unregister_performance(perf, cpu); |
549 | err_free: | 535 | err_free: |
550 | kfree(data); | 536 | kfree(data); |
551 | acpi_io_data[cpu] = NULL; | 537 | drv_data[cpu] = NULL; |
552 | 538 | ||
553 | return (result); | 539 | return (result); |
554 | } | 540 | } |
@@ -558,14 +544,14 @@ static int | |||
558 | acpi_cpufreq_cpu_exit ( | 544 | acpi_cpufreq_cpu_exit ( |
559 | struct cpufreq_policy *policy) | 545 | struct cpufreq_policy *policy) |
560 | { | 546 | { |
561 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; | 547 | struct acpi_cpufreq_data *data = drv_data[policy->cpu]; |
562 | 548 | ||
563 | 549 | ||
564 | dprintk("acpi_cpufreq_cpu_exit\n"); | 550 | dprintk("acpi_cpufreq_cpu_exit\n"); |
565 | 551 | ||
566 | if (data) { | 552 | if (data) { |
567 | cpufreq_frequency_table_put_attr(policy->cpu); | 553 | cpufreq_frequency_table_put_attr(policy->cpu); |
568 | acpi_io_data[policy->cpu] = NULL; | 554 | drv_data[policy->cpu] = NULL; |
569 | acpi_processor_unregister_performance(data->acpi_data, policy->cpu); | 555 | acpi_processor_unregister_performance(data->acpi_data, policy->cpu); |
570 | kfree(data); | 556 | kfree(data); |
571 | } | 557 | } |
@@ -577,7 +563,7 @@ static int | |||
577 | acpi_cpufreq_resume ( | 563 | acpi_cpufreq_resume ( |
578 | struct cpufreq_policy *policy) | 564 | struct cpufreq_policy *policy) |
579 | { | 565 | { |
580 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; | 566 | struct acpi_cpufreq_data *data = drv_data[policy->cpu]; |
581 | 567 | ||
582 | 568 | ||
583 | dprintk("acpi_cpufreq_resume\n"); | 569 | dprintk("acpi_cpufreq_resume\n"); |
@@ -596,6 +582,7 @@ static struct freq_attr* acpi_cpufreq_attr[] = { | |||
596 | static struct cpufreq_driver acpi_cpufreq_driver = { | 582 | static struct cpufreq_driver acpi_cpufreq_driver = { |
597 | .verify = acpi_cpufreq_verify, | 583 | .verify = acpi_cpufreq_verify, |
598 | .target = acpi_cpufreq_target, | 584 | .target = acpi_cpufreq_target, |
585 | .get = get_cur_freq_on_cpu, | ||
599 | .init = acpi_cpufreq_cpu_init, | 586 | .init = acpi_cpufreq_cpu_init, |
600 | .exit = acpi_cpufreq_cpu_exit, | 587 | .exit = acpi_cpufreq_cpu_exit, |
601 | .resume = acpi_cpufreq_resume, | 588 | .resume = acpi_cpufreq_resume, |
@@ -610,7 +597,7 @@ acpi_cpufreq_init (void) | |||
610 | { | 597 | { |
611 | dprintk("acpi_cpufreq_init\n"); | 598 | dprintk("acpi_cpufreq_init\n"); |
612 | 599 | ||
613 | acpi_cpufreq_early_init_acpi(); | 600 | acpi_cpufreq_early_init(); |
614 | 601 | ||
615 | return cpufreq_register_driver(&acpi_cpufreq_driver); | 602 | return cpufreq_register_driver(&acpi_cpufreq_driver); |
616 | } | 603 | } |