aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2007-11-20 01:20:31 -0500
committerLen Brown <len.brown@intel.com>2007-11-20 01:20:31 -0500
commit614a6bbecceb97558819f18a676fd819ea61550b (patch)
treed3f59dc6ef0ffdb14d8047072e096fdabeaf7d2b
parentc2e46d2e2a8e6ed17fac6154ac7e5fa7fe4efb28 (diff)
parentf79f06ab9f86d7203006d2ec8992ac80df36a34e (diff)
Pull thermal into release branch
-rw-r--r--arch/x86/kernel/acpi/processor.c3
-rw-r--r--drivers/acpi/osl.c25
-rw-r--r--drivers/acpi/processor_core.c12
-rw-r--r--drivers/acpi/processor_throttling.c286
4 files changed, 244 insertions, 82 deletions
diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c
index f63e5ff0aca1..a25db514c719 100644
--- a/arch/x86/kernel/acpi/processor.c
+++ b/arch/x86/kernel/acpi/processor.c
@@ -49,6 +49,9 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
49 if (cpu_has(c, X86_FEATURE_EST)) 49 if (cpu_has(c, X86_FEATURE_EST))
50 buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP; 50 buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
51 51
52 if (cpu_has(c, X86_FEATURE_ACPI))
53 buf[2] |= ACPI_PDC_T_FFH;
54
52 obj->type = ACPI_TYPE_BUFFER; 55 obj->type = ACPI_TYPE_BUFFER;
53 obj->buffer.length = 12; 56 obj->buffer.length = 12;
54 obj->buffer.pointer = (u8 *) buf; 57 obj->buffer.pointer = (u8 *) buf;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index aabc6ca4a81c..e3a673a00845 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -387,17 +387,14 @@ acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
387 if (!value) 387 if (!value)
388 value = &dummy; 388 value = &dummy;
389 389
390 switch (width) { 390 *value = 0;
391 case 8: 391 if (width <= 8) {
392 *(u8 *) value = inb(port); 392 *(u8 *) value = inb(port);
393 break; 393 } else if (width <= 16) {
394 case 16:
395 *(u16 *) value = inw(port); 394 *(u16 *) value = inw(port);
396 break; 395 } else if (width <= 32) {
397 case 32:
398 *(u32 *) value = inl(port); 396 *(u32 *) value = inl(port);
399 break; 397 } else {
400 default:
401 BUG(); 398 BUG();
402 } 399 }
403 400
@@ -408,17 +405,13 @@ EXPORT_SYMBOL(acpi_os_read_port);
408 405
409acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) 406acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
410{ 407{
411 switch (width) { 408 if (width <= 8) {
412 case 8:
413 outb(value, port); 409 outb(value, port);
414 break; 410 } else if (width <= 16) {
415 case 16:
416 outw(value, port); 411 outw(value, port);
417 break; 412 } else if (width <= 32) {
418 case 32:
419 outl(value, port); 413 outl(value, port);
420 break; 414 } else {
421 default:
422 BUG(); 415 BUG();
423 } 416 }
424 417
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 235a51e328c3..e93318bb029e 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -612,12 +612,6 @@ static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid)
612 request_region(pr->throttling.address, 6, "ACPI CPU throttle"); 612 request_region(pr->throttling.address, 6, "ACPI CPU throttle");
613 } 613 }
614 614
615#ifdef CONFIG_CPU_FREQ
616 acpi_processor_ppc_has_changed(pr);
617#endif
618 acpi_processor_get_throttling_info(pr);
619 acpi_processor_get_limit_info(pr);
620
621 return 0; 615 return 0;
622} 616}
623 617
@@ -665,6 +659,12 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device)
665 /* _PDC call should be done before doing anything else (if reqd.). */ 659 /* _PDC call should be done before doing anything else (if reqd.). */
666 arch_acpi_processor_init_pdc(pr); 660 arch_acpi_processor_init_pdc(pr);
667 acpi_processor_set_pdc(pr); 661 acpi_processor_set_pdc(pr);
662#ifdef CONFIG_CPU_FREQ
663 acpi_processor_ppc_has_changed(pr);
664#endif
665 acpi_processor_get_throttling_info(pr);
666 acpi_processor_get_limit_info(pr);
667
668 668
669 acpi_processor_power_init(pr, device); 669 acpi_processor_power_init(pr, device);
670 670
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 0b8204e7082a..c26c61fb36c3 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -70,7 +70,55 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
70 70
71int acpi_processor_tstate_has_changed(struct acpi_processor *pr) 71int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
72{ 72{
73 return acpi_processor_get_platform_limit(pr); 73 int result = 0;
74 int throttling_limit;
75 int current_state;
76 struct acpi_processor_limit *limit;
77 int target_state;
78
79 result = acpi_processor_get_platform_limit(pr);
80 if (result) {
81 /* Throttling Limit is unsupported */
82 return result;
83 }
84
85 throttling_limit = pr->throttling_platform_limit;
86 if (throttling_limit >= pr->throttling.state_count) {
87 /* Uncorrect Throttling Limit */
88 return -EINVAL;
89 }
90
91 current_state = pr->throttling.state;
92 if (current_state > throttling_limit) {
93 /*
94 * The current state can meet the requirement of
95 * _TPC limit. But it is reasonable that OSPM changes
96 * t-states from high to low for better performance.
97 * Of course the limit condition of thermal
98 * and user should be considered.
99 */
100 limit = &pr->limit;
101 target_state = throttling_limit;
102 if (limit->thermal.tx > target_state)
103 target_state = limit->thermal.tx;
104 if (limit->user.tx > target_state)
105 target_state = limit->user.tx;
106 } else if (current_state == throttling_limit) {
107 /*
108 * Unnecessary to change the throttling state
109 */
110 return 0;
111 } else {
112 /*
113 * If the current state is lower than the limit of _TPC, it
114 * will be forced to switch to the throttling state defined
115 * by throttling_platfor_limit.
116 * Because the previous state meets with the limit condition
117 * of thermal and user, it is unnecessary to check it again.
118 */
119 target_state = throttling_limit;
120 }
121 return acpi_processor_set_throttling(pr, target_state);
74} 122}
75 123
76/* 124/*
@@ -83,6 +131,7 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
83 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 131 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
84 union acpi_object *ptc = NULL; 132 union acpi_object *ptc = NULL;
85 union acpi_object obj = { 0 }; 133 union acpi_object obj = { 0 };
134 struct acpi_processor_throttling *throttling;
86 135
87 status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer); 136 status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
88 if (ACPI_FAILURE(status)) { 137 if (ACPI_FAILURE(status)) {
@@ -134,6 +183,22 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
134 memcpy(&pr->throttling.status_register, obj.buffer.pointer, 183 memcpy(&pr->throttling.status_register, obj.buffer.pointer,
135 sizeof(struct acpi_ptc_register)); 184 sizeof(struct acpi_ptc_register));
136 185
186 throttling = &pr->throttling;
187
188 if ((throttling->control_register.bit_width +
189 throttling->control_register.bit_offset) > 32) {
190 printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
191 result = -EFAULT;
192 goto end;
193 }
194
195 if ((throttling->status_register.bit_width +
196 throttling->status_register.bit_offset) > 32) {
197 printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
198 result = -EFAULT;
199 goto end;
200 }
201
137 end: 202 end:
138 kfree(buffer.pointer); 203 kfree(buffer.pointer);
139 204
@@ -328,44 +393,132 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
328 return 0; 393 return 0;
329} 394}
330 395
331static int acpi_read_throttling_status(struct acpi_processor_throttling 396#ifdef CONFIG_X86
332 *throttling) 397static int acpi_throttling_rdmsr(struct acpi_processor *pr,
398 acpi_integer * value)
333{ 399{
334 int value = -1; 400 struct cpuinfo_x86 *c;
401 u64 msr_high, msr_low;
402 unsigned int cpu;
403 u64 msr = 0;
404 int ret = -1;
405
406 cpu = pr->id;
407 c = &cpu_data(cpu);
408
409 if ((c->x86_vendor != X86_VENDOR_INTEL) ||
410 !cpu_has(c, X86_FEATURE_ACPI)) {
411 printk(KERN_ERR PREFIX
412 "HARDWARE addr space,NOT supported yet\n");
413 } else {
414 msr_low = 0;
415 msr_high = 0;
416 rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL,
417 (u32 *)&msr_low , (u32 *) &msr_high);
418 msr = (msr_high << 32) | msr_low;
419 *value = (acpi_integer) msr;
420 ret = 0;
421 }
422 return ret;
423}
424
425static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
426{
427 struct cpuinfo_x86 *c;
428 unsigned int cpu;
429 int ret = -1;
430 u64 msr;
431
432 cpu = pr->id;
433 c = &cpu_data(cpu);
434
435 if ((c->x86_vendor != X86_VENDOR_INTEL) ||
436 !cpu_has(c, X86_FEATURE_ACPI)) {
437 printk(KERN_ERR PREFIX
438 "HARDWARE addr space,NOT supported yet\n");
439 } else {
440 msr = value;
441 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL,
442 msr & 0xffffffff, msr >> 32);
443 ret = 0;
444 }
445 return ret;
446}
447#else
448static int acpi_throttling_rdmsr(struct acpi_processor *pr,
449 acpi_integer * value)
450{
451 printk(KERN_ERR PREFIX
452 "HARDWARE addr space,NOT supported yet\n");
453 return -1;
454}
455
456static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
457{
458 printk(KERN_ERR PREFIX
459 "HARDWARE addr space,NOT supported yet\n");
460 return -1;
461}
462#endif
463
464static int acpi_read_throttling_status(struct acpi_processor *pr,
465 acpi_integer *value)
466{
467 u32 bit_width, bit_offset;
468 u64 ptc_value;
469 u64 ptc_mask;
470 struct acpi_processor_throttling *throttling;
471 int ret = -1;
472
473 throttling = &pr->throttling;
335 switch (throttling->status_register.space_id) { 474 switch (throttling->status_register.space_id) {
336 case ACPI_ADR_SPACE_SYSTEM_IO: 475 case ACPI_ADR_SPACE_SYSTEM_IO:
476 ptc_value = 0;
477 bit_width = throttling->status_register.bit_width;
478 bit_offset = throttling->status_register.bit_offset;
479
337 acpi_os_read_port((acpi_io_address) throttling->status_register. 480 acpi_os_read_port((acpi_io_address) throttling->status_register.
338 address, &value, 481 address, (u32 *) &ptc_value,
339 (u32) throttling->status_register.bit_width * 482 (u32) (bit_width + bit_offset));
340 8); 483 ptc_mask = (1 << bit_width) - 1;
484 *value = (acpi_integer) ((ptc_value >> bit_offset) & ptc_mask);
485 ret = 0;
341 break; 486 break;
342 case ACPI_ADR_SPACE_FIXED_HARDWARE: 487 case ACPI_ADR_SPACE_FIXED_HARDWARE:
343 printk(KERN_ERR PREFIX 488 ret = acpi_throttling_rdmsr(pr, value);
344 "HARDWARE addr space,NOT supported yet\n");
345 break; 489 break;
346 default: 490 default:
347 printk(KERN_ERR PREFIX "Unknown addr space %d\n", 491 printk(KERN_ERR PREFIX "Unknown addr space %d\n",
348 (u32) (throttling->status_register.space_id)); 492 (u32) (throttling->status_register.space_id));
349 } 493 }
350 return value; 494 return ret;
351} 495}
352 496
353static int acpi_write_throttling_state(struct acpi_processor_throttling 497static int acpi_write_throttling_state(struct acpi_processor *pr,
354 *throttling, int value) 498 acpi_integer value)
355{ 499{
500 u32 bit_width, bit_offset;
501 u64 ptc_value;
502 u64 ptc_mask;
503 struct acpi_processor_throttling *throttling;
356 int ret = -1; 504 int ret = -1;
357 505
506 throttling = &pr->throttling;
358 switch (throttling->control_register.space_id) { 507 switch (throttling->control_register.space_id) {
359 case ACPI_ADR_SPACE_SYSTEM_IO: 508 case ACPI_ADR_SPACE_SYSTEM_IO:
509 bit_width = throttling->control_register.bit_width;
510 bit_offset = throttling->control_register.bit_offset;
511 ptc_mask = (1 << bit_width) - 1;
512 ptc_value = value & ptc_mask;
513
360 acpi_os_write_port((acpi_io_address) throttling-> 514 acpi_os_write_port((acpi_io_address) throttling->
361 control_register.address, value, 515 control_register.address,
362 (u32) throttling->control_register. 516 (u32) (ptc_value << bit_offset),
363 bit_width * 8); 517 (u32) (bit_width + bit_offset));
364 ret = 0; 518 ret = 0;
365 break; 519 break;
366 case ACPI_ADR_SPACE_FIXED_HARDWARE: 520 case ACPI_ADR_SPACE_FIXED_HARDWARE:
367 printk(KERN_ERR PREFIX 521 ret = acpi_throttling_wrmsr(pr, value);
368 "HARDWARE addr space,NOT supported yet\n");
369 break; 522 break;
370 default: 523 default:
371 printk(KERN_ERR PREFIX "Unknown addr space %d\n", 524 printk(KERN_ERR PREFIX "Unknown addr space %d\n",
@@ -374,7 +527,8 @@ static int acpi_write_throttling_state(struct acpi_processor_throttling
374 return ret; 527 return ret;
375} 528}
376 529
377static int acpi_get_throttling_state(struct acpi_processor *pr, int value) 530static int acpi_get_throttling_state(struct acpi_processor *pr,
531 acpi_integer value)
378{ 532{
379 int i; 533 int i;
380 534
@@ -390,22 +544,26 @@ static int acpi_get_throttling_state(struct acpi_processor *pr, int value)
390 return i; 544 return i;
391} 545}
392 546
393static int acpi_get_throttling_value(struct acpi_processor *pr, int state) 547static int acpi_get_throttling_value(struct acpi_processor *pr,
548 int state, acpi_integer *value)
394{ 549{
395 int value = -1; 550 int ret = -1;
551
396 if (state >= 0 && state <= pr->throttling.state_count) { 552 if (state >= 0 && state <= pr->throttling.state_count) {
397 struct acpi_processor_tx_tss *tx = 553 struct acpi_processor_tx_tss *tx =
398 (struct acpi_processor_tx_tss *)&(pr->throttling. 554 (struct acpi_processor_tx_tss *)&(pr->throttling.
399 states_tss[state]); 555 states_tss[state]);
400 value = tx->control; 556 *value = tx->control;
557 ret = 0;
401 } 558 }
402 return value; 559 return ret;
403} 560}
404 561
405static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) 562static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
406{ 563{
407 int state = 0; 564 int state = 0;
408 u32 value = 0; 565 int ret;
566 acpi_integer value;
409 567
410 if (!pr) 568 if (!pr)
411 return -EINVAL; 569 return -EINVAL;
@@ -415,8 +573,9 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
415 573
416 pr->throttling.state = 0; 574 pr->throttling.state = 0;
417 local_irq_disable(); 575 local_irq_disable();
418 value = acpi_read_throttling_status(&pr->throttling); 576 value = 0;
419 if (value >= 0) { 577 ret = acpi_read_throttling_status(pr, &value);
578 if (ret >= 0) {
420 state = acpi_get_throttling_state(pr, value); 579 state = acpi_get_throttling_state(pr, value);
421 pr->throttling.state = state; 580 pr->throttling.state = state;
422 } 581 }
@@ -430,6 +589,40 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
430 return pr->throttling.acpi_processor_get_throttling(pr); 589 return pr->throttling.acpi_processor_get_throttling(pr);
431} 590}
432 591
592static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
593{
594 int i, step;
595
596 if (!pr->throttling.address) {
597 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
598 return -EINVAL;
599 } else if (!pr->throttling.duty_width) {
600 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
601 return -EINVAL;
602 }
603 /* TBD: Support duty_cycle values that span bit 4. */
604 else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
605 printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
606 return -EINVAL;
607 }
608
609 pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
610
611 /*
612 * Compute state values. Note that throttling displays a linear power
613 * performance relationship (at 50% performance the CPU will consume
614 * 50% power). Values are in 1/10th of a percent to preserve accuracy.
615 */
616
617 step = (1000 / pr->throttling.state_count);
618
619 for (i = 0; i < pr->throttling.state_count; i++) {
620 pr->throttling.states[i].performance = 1000 - step * i;
621 pr->throttling.states[i].power = 1000 - step * i;
622 }
623 return 0;
624}
625
433static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, 626static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
434 int state) 627 int state)
435{ 628{
@@ -506,7 +699,8 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
506static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, 699static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
507 int state) 700 int state)
508{ 701{
509 u32 value = 0; 702 int ret;
703 acpi_integer value;
510 704
511 if (!pr) 705 if (!pr)
512 return -EINVAL; 706 return -EINVAL;
@@ -524,10 +718,10 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
524 return -EPERM; 718 return -EPERM;
525 719
526 local_irq_disable(); 720 local_irq_disable();
527 721 value = 0;
528 value = acpi_get_throttling_value(pr, state); 722 ret = acpi_get_throttling_value(pr, state, &value);
529 if (value >= 0) { 723 if (ret >= 0) {
530 acpi_write_throttling_state(&pr->throttling, value); 724 acpi_write_throttling_state(pr, value);
531 pr->throttling.state = state; 725 pr->throttling.state = state;
532 } 726 }
533 local_irq_enable(); 727 local_irq_enable();
@@ -543,8 +737,6 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
543int acpi_processor_get_throttling_info(struct acpi_processor *pr) 737int acpi_processor_get_throttling_info(struct acpi_processor *pr)
544{ 738{
545 int result = 0; 739 int result = 0;
546 int step = 0;
547 int i = 0;
548 740
549 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 741 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
550 "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n", 742 "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
@@ -563,6 +755,8 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
563 acpi_processor_get_throttling_states(pr) || 755 acpi_processor_get_throttling_states(pr) ||
564 acpi_processor_get_platform_limit(pr)) 756 acpi_processor_get_platform_limit(pr))
565 { 757 {
758 if (acpi_processor_get_fadt_info(pr))
759 return 0;
566 pr->throttling.acpi_processor_get_throttling = 760 pr->throttling.acpi_processor_get_throttling =
567 &acpi_processor_get_throttling_fadt; 761 &acpi_processor_get_throttling_fadt;
568 pr->throttling.acpi_processor_set_throttling = 762 pr->throttling.acpi_processor_set_throttling =
@@ -576,19 +770,6 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
576 770
577 acpi_processor_get_tsd(pr); 771 acpi_processor_get_tsd(pr);
578 772
579 if (!pr->throttling.address) {
580 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
581 return 0;
582 } else if (!pr->throttling.duty_width) {
583 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
584 return 0;
585 }
586 /* TBD: Support duty_cycle values that span bit 4. */
587 else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
588 printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
589 return 0;
590 }
591
592 /* 773 /*
593 * PIIX4 Errata: We don't support throttling on the original PIIX4. 774 * PIIX4 Errata: We don't support throttling on the original PIIX4.
594 * This shouldn't be an issue as few (if any) mobile systems ever 775 * This shouldn't be an issue as few (if any) mobile systems ever
@@ -600,21 +781,6 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
600 return 0; 781 return 0;
601 } 782 }
602 783
603 pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
604
605 /*
606 * Compute state values. Note that throttling displays a linear power/
607 * performance relationship (at 50% performance the CPU will consume
608 * 50% power). Values are in 1/10th of a percent to preserve accuracy.
609 */
610
611 step = (1000 / pr->throttling.state_count);
612
613 for (i = 0; i < pr->throttling.state_count; i++) {
614 pr->throttling.states[i].performance = step * i;
615 pr->throttling.states[i].power = step * i;
616 }
617
618 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", 784 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
619 pr->throttling.state_count)); 785 pr->throttling.state_count));
620 786