aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>2005-12-14 15:05:00 -0500
committerLen Brown <len.brown@intel.com>2006-02-09 03:21:49 -0500
commit09b4d1ee881c8593bfad2a42f838d85070365c3e (patch)
tree7576d293ed3d14efcde7b88dcc352041c7dce7d1 /arch/i386
parent3b2d99429e3386b6e2ac949fc72486509c8bbe36 (diff)
P-state software coordination for acpi-cpufreq
http://bugzilla.kernel.org/show_bug.cgi?id=5737 Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c285
1 files changed, 197 insertions, 88 deletions
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 3852d0a4c1b5..4c7c6e089e87 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -48,12 +48,13 @@ MODULE_LICENSE("GPL");
48 48
49 49
50struct cpufreq_acpi_io { 50struct cpufreq_acpi_io {
51 struct acpi_processor_performance acpi_data; 51 struct acpi_processor_performance *acpi_data;
52 struct cpufreq_frequency_table *freq_table; 52 struct cpufreq_frequency_table *freq_table;
53 unsigned int resume; 53 unsigned int resume;
54}; 54};
55 55
56static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS]; 56static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
57static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
57 58
58static struct cpufreq_driver acpi_cpufreq_driver; 59static struct cpufreq_driver acpi_cpufreq_driver;
59 60
@@ -104,64 +105,43 @@ acpi_processor_set_performance (
104{ 105{
105 u16 port = 0; 106 u16 port = 0;
106 u8 bit_width = 0; 107 u8 bit_width = 0;
108 int i = 0;
107 int ret = 0; 109 int ret = 0;
108 u32 value = 0; 110 u32 value = 0;
109 int i = 0;
110 struct cpufreq_freqs cpufreq_freqs;
111 cpumask_t saved_mask;
112 int retval; 111 int retval;
112 struct acpi_processor_performance *perf;
113 113
114 dprintk("acpi_processor_set_performance\n"); 114 dprintk("acpi_processor_set_performance\n");
115 115
116 /* 116 retval = 0;
117 * TBD: Use something other than set_cpus_allowed. 117 perf = data->acpi_data;
118 * As set_cpus_allowed is a bit racy, 118 if (state == perf->state) {
119 * with any other set_cpus_allowed for this process.
120 */
121 saved_mask = current->cpus_allowed;
122 set_cpus_allowed(current, cpumask_of_cpu(cpu));
123 if (smp_processor_id() != cpu) {
124 return (-EAGAIN);
125 }
126
127 if (state == data->acpi_data.state) {
128 if (unlikely(data->resume)) { 119 if (unlikely(data->resume)) {
129 dprintk("Called after resume, resetting to P%d\n", state); 120 dprintk("Called after resume, resetting to P%d\n", state);
130 data->resume = 0; 121 data->resume = 0;
131 } else { 122 } else {
132 dprintk("Already at target state (P%d)\n", state); 123 dprintk("Already at target state (P%d)\n", state);
133 retval = 0; 124 return (retval);
134 goto migrate_end;
135 } 125 }
136 } 126 }
137 127
138 dprintk("Transitioning from P%d to P%d\n", 128 dprintk("Transitioning from P%d to P%d\n", perf->state, state);
139 data->acpi_data.state, state);
140
141 /* cpufreq frequency struct */
142 cpufreq_freqs.cpu = cpu;
143 cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
144 cpufreq_freqs.new = data->freq_table[state].frequency;
145
146 /* notify cpufreq */
147 cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
148 129
149 /* 130 /*
150 * First we write the target state's 'control' value to the 131 * First we write the target state's 'control' value to the
151 * control_register. 132 * control_register.
152 */ 133 */
153 134
154 port = data->acpi_data.control_register.address; 135 port = perf->control_register.address;
155 bit_width = data->acpi_data.control_register.bit_width; 136 bit_width = perf->control_register.bit_width;
156 value = (u32) data->acpi_data.states[state].control; 137 value = (u32) perf->states[state].control;
157 138
158 dprintk("Writing 0x%08x to port 0x%04x\n", value, port); 139 dprintk("Writing 0x%08x to port 0x%04x\n", value, port);
159 140
160 ret = acpi_processor_write_port(port, bit_width, value); 141 ret = acpi_processor_write_port(port, bit_width, value);
161 if (ret) { 142 if (ret) {
162 dprintk("Invalid port width 0x%04x\n", bit_width); 143 dprintk("Invalid port width 0x%04x\n", bit_width);
163 retval = ret; 144 return (ret);
164 goto migrate_end;
165 } 145 }
166 146
167 /* 147 /*
@@ -177,49 +157,36 @@ acpi_processor_set_performance (
177 * before giving up. 157 * before giving up.
178 */ 158 */
179 159
180 port = data->acpi_data.status_register.address; 160 port = perf->status_register.address;
181 bit_width = data->acpi_data.status_register.bit_width; 161 bit_width = perf->status_register.bit_width;
182 162
183 dprintk("Looking for 0x%08x from port 0x%04x\n", 163 dprintk("Looking for 0x%08x from port 0x%04x\n",
184 (u32) data->acpi_data.states[state].status, port); 164 (u32) perf->states[state].status, port);
185 165
186 for (i=0; i<100; i++) { 166 for (i = 0; i < 100; i++) {
187 ret = acpi_processor_read_port(port, bit_width, &value); 167 ret = acpi_processor_read_port(port, bit_width, &value);
188 if (ret) { 168 if (ret) {
189 dprintk("Invalid port width 0x%04x\n", bit_width); 169 dprintk("Invalid port width 0x%04x\n", bit_width);
190 retval = ret; 170 return (ret);
191 goto migrate_end;
192 } 171 }
193 if (value == (u32) data->acpi_data.states[state].status) 172 if (value == (u32) perf->states[state].status)
194 break; 173 break;
195 udelay(10); 174 udelay(10);
196 } 175 }
197 } else { 176 } else {
198 i = 0; 177 i = 0;
199 value = (u32) data->acpi_data.states[state].status; 178 value = (u32) perf->states[state].status;
200 } 179 }
201 180
202 /* notify cpufreq */ 181 if (unlikely(value != (u32) perf->states[state].status)) {
203 cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
204
205 if (unlikely(value != (u32) data->acpi_data.states[state].status)) {
206 unsigned int tmp = cpufreq_freqs.new;
207 cpufreq_freqs.new = cpufreq_freqs.old;
208 cpufreq_freqs.old = tmp;
209 cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
210 cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
211 printk(KERN_WARNING "acpi-cpufreq: Transition failed\n"); 182 printk(KERN_WARNING "acpi-cpufreq: Transition failed\n");
212 retval = -ENODEV; 183 retval = -ENODEV;
213 goto migrate_end; 184 return (retval);
214 } 185 }
215 186
216 dprintk("Transition successful after %d microseconds\n", i * 10); 187 dprintk("Transition successful after %d microseconds\n", i * 10);
217 188
218 data->acpi_data.state = state; 189 perf->state = state;
219
220 retval = 0;
221migrate_end:
222 set_cpus_allowed(current, saved_mask);
223 return (retval); 190 return (retval);
224} 191}
225 192
@@ -231,8 +198,17 @@ acpi_cpufreq_target (
231 unsigned int relation) 198 unsigned int relation)
232{ 199{
233 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 200 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
201 struct acpi_processor_performance *perf;
202 struct cpufreq_freqs freqs;
203 cpumask_t online_policy_cpus;
204 cpumask_t saved_mask;
205 cpumask_t set_mask;
206 cpumask_t covered_cpus;
207 unsigned int cur_state = 0;
234 unsigned int next_state = 0; 208 unsigned int next_state = 0;
235 unsigned int result = 0; 209 unsigned int result = 0;
210 unsigned int j;
211 unsigned int tmp;
236 212
237 dprintk("acpi_cpufreq_setpolicy\n"); 213 dprintk("acpi_cpufreq_setpolicy\n");
238 214
@@ -241,11 +217,91 @@ acpi_cpufreq_target (
241 target_freq, 217 target_freq,
242 relation, 218 relation,
243 &next_state); 219 &next_state);
244 if (result) 220 if (unlikely(result))
245 return (result); 221 return (result);
246 222
247 result = acpi_processor_set_performance (data, policy->cpu, next_state); 223 perf = data->acpi_data;
224 cur_state = perf->state;
225 freqs.old = data->freq_table[cur_state].frequency;
226 freqs.new = data->freq_table[next_state].frequency;
227
228 /* cpufreq holds the hotplug lock, so we are safe from here on */
229 cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
248 230
231 for_each_cpu_mask(j, online_policy_cpus) {
232 freqs.cpu = j;
233 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
234 }
235
236 /*
237 * We need to call driver->target() on all or any CPU in
238 * policy->cpus, depending on policy->shared_type.
239 */
240 saved_mask = current->cpus_allowed;
241 cpus_clear(covered_cpus);
242 for_each_cpu_mask(j, online_policy_cpus) {
243 /*
244 * Support for SMP systems.
245 * Make sure we are running on CPU that wants to change freq
246 */
247 cpus_clear(set_mask);
248 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
249 cpus_or(set_mask, set_mask, online_policy_cpus);
250 else
251 cpu_set(j, set_mask);
252
253 set_cpus_allowed(current, set_mask);
254 if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
255 dprintk("couldn't limit to CPUs in this domain\n");
256 result = -EAGAIN;
257 break;
258 }
259
260 result = acpi_processor_set_performance (data, j, next_state);
261 if (result) {
262 result = -EAGAIN;
263 break;
264 }
265
266 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
267 break;
268
269 cpu_set(j, covered_cpus);
270 }
271
272 for_each_cpu_mask(j, online_policy_cpus) {
273 freqs.cpu = j;
274 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
275 }
276
277 if (unlikely(result)) {
278 /*
279 * We have failed halfway through the frequency change.
280 * We have sent callbacks to online_policy_cpus and
281 * acpi_processor_set_performance() has been called on
282 * coverd_cpus. Best effort undo..
283 */
284
285 if (!cpus_empty(covered_cpus)) {
286 for_each_cpu_mask(j, covered_cpus) {
287 policy->cpu = j;
288 acpi_processor_set_performance (data,
289 j,
290 cur_state);
291 }
292 }
293
294 tmp = freqs.new;
295 freqs.new = freqs.old;
296 freqs.old = tmp;
297 for_each_cpu_mask(j, online_policy_cpus) {
298 freqs.cpu = j;
299 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
300 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
301 }
302 }
303
304 set_cpus_allowed(current, saved_mask);
249 return (result); 305 return (result);
250} 306}
251 307
@@ -271,30 +327,65 @@ acpi_cpufreq_guess_freq (
271 struct cpufreq_acpi_io *data, 327 struct cpufreq_acpi_io *data,
272 unsigned int cpu) 328 unsigned int cpu)
273{ 329{
330 struct acpi_processor_performance *perf = data->acpi_data;
331
274 if (cpu_khz) { 332 if (cpu_khz) {
275 /* search the closest match to cpu_khz */ 333 /* search the closest match to cpu_khz */
276 unsigned int i; 334 unsigned int i;
277 unsigned long freq; 335 unsigned long freq;
278 unsigned long freqn = data->acpi_data.states[0].core_frequency * 1000; 336 unsigned long freqn = perf->states[0].core_frequency * 1000;
279 337
280 for (i=0; i < (data->acpi_data.state_count - 1); i++) { 338 for (i = 0; i < (perf->state_count - 1); i++) {
281 freq = freqn; 339 freq = freqn;
282 freqn = data->acpi_data.states[i+1].core_frequency * 1000; 340 freqn = perf->states[i+1].core_frequency * 1000;
283 if ((2 * cpu_khz) > (freqn + freq)) { 341 if ((2 * cpu_khz) > (freqn + freq)) {
284 data->acpi_data.state = i; 342 perf->state = i;
285 return (freq); 343 return (freq);
286 } 344 }
287 } 345 }
288 data->acpi_data.state = data->acpi_data.state_count - 1; 346 perf->state = perf->state_count - 1;
289 return (freqn); 347 return (freqn);
290 } else 348 } else {
291 /* assume CPU is at P0... */ 349 /* assume CPU is at P0... */
292 data->acpi_data.state = 0; 350 perf->state = 0;
293 return data->acpi_data.states[0].core_frequency * 1000; 351 return perf->states[0].core_frequency * 1000;
294 352 }
295} 353}
296 354
297 355
356/*
357 * acpi_cpufreq_early_init - initialize ACPI P-States library
358 *
359 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
360 * in order to determine correct frequency and voltage pairings. We can
361 * do _PDC and _PSD and find out the processor dependency for the
362 * actual init that will happen later...
363 */
364static int acpi_cpufreq_early_init_acpi(void)
365{
366 struct acpi_processor_performance *data;
367 unsigned int i, j;
368
369 dprintk("acpi_cpufreq_early_init\n");
370
371 for_each_cpu(i) {
372 data = kzalloc(sizeof(struct acpi_processor_performance),
373 GFP_KERNEL);
374 if (!data) {
375 for_each_cpu(j) {
376 kfree(acpi_perf_data[j]);
377 acpi_perf_data[j] = NULL;
378 }
379 return (-ENOMEM);
380 }
381 acpi_perf_data[i] = data;
382 }
383
384 /* Do initialization in ACPI core */
385 acpi_processor_preregister_performance(acpi_perf_data);
386 return 0;
387}
388
298static int 389static int
299acpi_cpufreq_cpu_init ( 390acpi_cpufreq_cpu_init (
300 struct cpufreq_policy *policy) 391 struct cpufreq_policy *policy)
@@ -304,41 +395,51 @@ acpi_cpufreq_cpu_init (
304 struct cpufreq_acpi_io *data; 395 struct cpufreq_acpi_io *data;
305 unsigned int result = 0; 396 unsigned int result = 0;
306 struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; 397 struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
398 struct acpi_processor_performance *perf;
307 399
308 dprintk("acpi_cpufreq_cpu_init\n"); 400 dprintk("acpi_cpufreq_cpu_init\n");
309 401
402 if (!acpi_perf_data[cpu])
403 return (-ENODEV);
404
310 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); 405 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
311 if (!data) 406 if (!data)
312 return (-ENOMEM); 407 return (-ENOMEM);
313 408
409 data->acpi_data = acpi_perf_data[cpu];
314 acpi_io_data[cpu] = data; 410 acpi_io_data[cpu] = data;
315 411
316 result = acpi_processor_register_performance(&data->acpi_data, cpu); 412 result = acpi_processor_register_performance(data->acpi_data, cpu);
317 413
318 if (result) 414 if (result)
319 goto err_free; 415 goto err_free;
320 416
417 perf = data->acpi_data;
418 policy->cpus = perf->shared_cpu_map;
419 policy->shared_type = perf->shared_type;
420
321 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { 421 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
322 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; 422 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
323 } 423 }
324 424
325 /* capability check */ 425 /* capability check */
326 if (data->acpi_data.state_count <= 1) { 426 if (perf->state_count <= 1) {
327 dprintk("No P-States\n"); 427 dprintk("No P-States\n");
328 result = -ENODEV; 428 result = -ENODEV;
329 goto err_unreg; 429 goto err_unreg;
330 } 430 }
331 if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) || 431
332 (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) { 432 if ((perf->control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) ||
433 (perf->status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
333 dprintk("Unsupported address space [%d, %d]\n", 434 dprintk("Unsupported address space [%d, %d]\n",
334 (u32) (data->acpi_data.control_register.space_id), 435 (u32) (perf->control_register.space_id),
335 (u32) (data->acpi_data.status_register.space_id)); 436 (u32) (perf->status_register.space_id));
336 result = -ENODEV; 437 result = -ENODEV;
337 goto err_unreg; 438 goto err_unreg;
338 } 439 }
339 440
340 /* alloc freq_table */ 441 /* alloc freq_table */
341 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (data->acpi_data.state_count + 1), GFP_KERNEL); 442 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (perf->state_count + 1), GFP_KERNEL);
342 if (!data->freq_table) { 443 if (!data->freq_table) {
343 result = -ENOMEM; 444 result = -ENOMEM;
344 goto err_unreg; 445 goto err_unreg;
@@ -346,9 +447,9 @@ acpi_cpufreq_cpu_init (
346 447
347 /* detect transition latency */ 448 /* detect transition latency */
348 policy->cpuinfo.transition_latency = 0; 449 policy->cpuinfo.transition_latency = 0;
349 for (i=0; i<data->acpi_data.state_count; i++) { 450 for (i=0; i<perf->state_count; i++) {
350 if ((data->acpi_data.states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency) 451 if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency)
351 policy->cpuinfo.transition_latency = data->acpi_data.states[i].transition_latency * 1000; 452 policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000;
352 } 453 }
353 policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 454 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
354 455
@@ -356,11 +457,11 @@ acpi_cpufreq_cpu_init (
356 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); 457 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
357 458
358 /* table init */ 459 /* table init */
359 for (i=0; i<=data->acpi_data.state_count; i++) 460 for (i=0; i<=perf->state_count; i++)
360 { 461 {
361 data->freq_table[i].index = i; 462 data->freq_table[i].index = i;
362 if (i<data->acpi_data.state_count) 463 if (i<perf->state_count)
363 data->freq_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; 464 data->freq_table[i].frequency = perf->states[i].core_frequency * 1000;
364 else 465 else
365 data->freq_table[i].frequency = CPUFREQ_TABLE_END; 466 data->freq_table[i].frequency = CPUFREQ_TABLE_END;
366 } 467 }
@@ -375,12 +476,12 @@ acpi_cpufreq_cpu_init (
375 476
376 printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management activated.\n", 477 printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management activated.\n",
377 cpu); 478 cpu);
378 for (i = 0; i < data->acpi_data.state_count; i++) 479 for (i = 0; i < perf->state_count; i++)
379 dprintk(" %cP%d: %d MHz, %d mW, %d uS\n", 480 dprintk(" %cP%d: %d MHz, %d mW, %d uS\n",
380 (i == data->acpi_data.state?'*':' '), i, 481 (i == perf->state?'*':' '), i,
381 (u32) data->acpi_data.states[i].core_frequency, 482 (u32) perf->states[i].core_frequency,
382 (u32) data->acpi_data.states[i].power, 483 (u32) perf->states[i].power,
383 (u32) data->acpi_data.states[i].transition_latency); 484 (u32) perf->states[i].transition_latency);
384 485
385 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); 486 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
386 487
@@ -395,7 +496,7 @@ acpi_cpufreq_cpu_init (
395 err_freqfree: 496 err_freqfree:
396 kfree(data->freq_table); 497 kfree(data->freq_table);
397 err_unreg: 498 err_unreg:
398 acpi_processor_unregister_performance(&data->acpi_data, cpu); 499 acpi_processor_unregister_performance(perf, cpu);
399 err_free: 500 err_free:
400 kfree(data); 501 kfree(data);
401 acpi_io_data[cpu] = NULL; 502 acpi_io_data[cpu] = NULL;
@@ -416,7 +517,7 @@ acpi_cpufreq_cpu_exit (
416 if (data) { 517 if (data) {
417 cpufreq_frequency_table_put_attr(policy->cpu); 518 cpufreq_frequency_table_put_attr(policy->cpu);
418 acpi_io_data[policy->cpu] = NULL; 519 acpi_io_data[policy->cpu] = NULL;
419 acpi_processor_unregister_performance(&data->acpi_data, policy->cpu); 520 acpi_processor_unregister_performance(data->acpi_data, policy->cpu);
420 kfree(data); 521 kfree(data);
421 } 522 }
422 523
@@ -462,7 +563,10 @@ acpi_cpufreq_init (void)
462 563
463 dprintk("acpi_cpufreq_init\n"); 564 dprintk("acpi_cpufreq_init\n");
464 565
465 result = cpufreq_register_driver(&acpi_cpufreq_driver); 566 result = acpi_cpufreq_early_init_acpi();
567
568 if (!result)
569 result = cpufreq_register_driver(&acpi_cpufreq_driver);
466 570
467 return (result); 571 return (result);
468} 572}
@@ -471,10 +575,15 @@ acpi_cpufreq_init (void)
471static void __exit 575static void __exit
472acpi_cpufreq_exit (void) 576acpi_cpufreq_exit (void)
473{ 577{
578 unsigned int i;
474 dprintk("acpi_cpufreq_exit\n"); 579 dprintk("acpi_cpufreq_exit\n");
475 580
476 cpufreq_unregister_driver(&acpi_cpufreq_driver); 581 cpufreq_unregister_driver(&acpi_cpufreq_driver);
477 582
583 for_each_cpu(i) {
584 kfree(acpi_perf_data[i]);
585 acpi_perf_data[i] = NULL;
586 }
478 return; 587 return;
479} 588}
480 589