aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/processor_idle.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r--drivers/acpi/processor_idle.c129
1 files changed, 58 insertions, 71 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 8a74bf3efd8e..e439eb77d283 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support 8 * - Added processor hotplug support
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
@@ -99,6 +99,9 @@ static int set_max_cstate(struct dmi_system_id *id)
99static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { 99static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
100 { set_max_cstate, "IBM ThinkPad R40e", { 100 { set_max_cstate, "IBM ThinkPad R40e", {
101 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 101 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
102 DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
103 { set_max_cstate, "IBM ThinkPad R40e", {
104 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
102 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1}, 105 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
103 { set_max_cstate, "IBM ThinkPad R40e", { 106 { set_max_cstate, "IBM ThinkPad R40e", {
104 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 107 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
@@ -261,21 +264,15 @@ static void acpi_processor_idle(void)
261 u32 bm_status = 0; 264 u32 bm_status = 0;
262 unsigned long diff = jiffies - pr->power.bm_check_timestamp; 265 unsigned long diff = jiffies - pr->power.bm_check_timestamp;
263 266
264 if (diff > 32) 267 if (diff > 31)
265 diff = 32; 268 diff = 31;
266 269
267 while (diff) { 270 pr->power.bm_activity <<= diff;
268 /* if we didn't get called, assume there was busmaster activity */
269 diff--;
270 if (diff)
271 pr->power.bm_activity |= 0x1;
272 pr->power.bm_activity <<= 1;
273 }
274 271
275 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, 272 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS,
276 &bm_status, ACPI_MTX_DO_NOT_LOCK); 273 &bm_status, ACPI_MTX_DO_NOT_LOCK);
277 if (bm_status) { 274 if (bm_status) {
278 pr->power.bm_activity++; 275 pr->power.bm_activity |= 0x1;
279 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 276 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS,
280 1, ACPI_MTX_DO_NOT_LOCK); 277 1, ACPI_MTX_DO_NOT_LOCK);
281 } 278 }
@@ -287,16 +284,16 @@ static void acpi_processor_idle(void)
287 else if (errata.piix4.bmisx) { 284 else if (errata.piix4.bmisx) {
288 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 285 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
289 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 286 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
290 pr->power.bm_activity++; 287 pr->power.bm_activity |= 0x1;
291 } 288 }
292 289
293 pr->power.bm_check_timestamp = jiffies; 290 pr->power.bm_check_timestamp = jiffies;
294 291
295 /* 292 /*
296 * Apply bus mastering demotion policy. Automatically demote 293 * If bus mastering is or was active this jiffy, demote
297 * to avoid a faulty transition. Note that the processor 294 * to avoid a faulty transition. Note that the processor
298 * won't enter a low-power state during this call (to this 295 * won't enter a low-power state during this call (to this
299 * funciton) but should upon the next. 296 * function) but should upon the next.
300 * 297 *
301 * TBD: A better policy might be to fallback to the demotion 298 * TBD: A better policy might be to fallback to the demotion
302 * state (use it for this quantum only) istead of 299 * state (use it for this quantum only) istead of
@@ -304,7 +301,8 @@ static void acpi_processor_idle(void)
304 * qualification. This may, however, introduce DMA 301 * qualification. This may, however, introduce DMA
305 * issues (e.g. floppy DMA transfer overrun/underrun). 302 * issues (e.g. floppy DMA transfer overrun/underrun).
306 */ 303 */
307 if (pr->power.bm_activity & cx->demotion.threshold.bm) { 304 if ((pr->power.bm_activity & 0x1) &&
305 cx->demotion.threshold.bm) {
308 local_irq_enable(); 306 local_irq_enable();
309 next_state = cx->demotion.state; 307 next_state = cx->demotion.state;
310 goto end; 308 goto end;
@@ -322,8 +320,6 @@ static void acpi_processor_idle(void)
322 cx = &pr->power.states[ACPI_STATE_C1]; 320 cx = &pr->power.states[ACPI_STATE_C1];
323#endif 321#endif
324 322
325 cx->usage++;
326
327 /* 323 /*
328 * Sleep: 324 * Sleep:
329 * ------ 325 * ------
@@ -365,7 +361,9 @@ static void acpi_processor_idle(void)
365 t1 = inl(acpi_fadt.xpm_tmr_blk.address); 361 t1 = inl(acpi_fadt.xpm_tmr_blk.address);
366 /* Invoke C2 */ 362 /* Invoke C2 */
367 inb(cx->address); 363 inb(cx->address);
368 /* Dummy op - must do something useless after P_LVL2 read */ 364 /* Dummy wait op - must do something useless after P_LVL2 read
365 because chipsets cannot guarantee that STPCLK# signal
366 gets asserted in time to freeze execution properly. */
369 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 367 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
370 /* Get end time (ticks) */ 368 /* Get end time (ticks) */
371 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 369 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
@@ -403,7 +401,7 @@ static void acpi_processor_idle(void)
403 t1 = inl(acpi_fadt.xpm_tmr_blk.address); 401 t1 = inl(acpi_fadt.xpm_tmr_blk.address);
404 /* Invoke C3 */ 402 /* Invoke C3 */
405 inb(cx->address); 403 inb(cx->address);
406 /* Dummy op - must do something useless after P_LVL3 read */ 404 /* Dummy wait op (see above) */
407 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 405 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
408 /* Get end time (ticks) */ 406 /* Get end time (ticks) */
409 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 407 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
@@ -430,6 +428,9 @@ static void acpi_processor_idle(void)
430 local_irq_enable(); 428 local_irq_enable();
431 return; 429 return;
432 } 430 }
431 cx->usage++;
432 if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
433 cx->time += sleep_ticks;
433 434
434 next_state = pr->power.state; 435 next_state = pr->power.state;
435 436
@@ -517,10 +518,9 @@ static int acpi_processor_set_power_policy(struct acpi_processor *pr)
517 struct acpi_processor_cx *higher = NULL; 518 struct acpi_processor_cx *higher = NULL;
518 struct acpi_processor_cx *cx; 519 struct acpi_processor_cx *cx;
519 520
520 ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy");
521 521
522 if (!pr) 522 if (!pr)
523 return_VALUE(-EINVAL); 523 return -EINVAL;
524 524
525 /* 525 /*
526 * This function sets the default Cx state policy (OS idle handler). 526 * This function sets the default Cx state policy (OS idle handler).
@@ -544,7 +544,7 @@ static int acpi_processor_set_power_policy(struct acpi_processor *pr)
544 } 544 }
545 545
546 if (!state_is_set) 546 if (!state_is_set)
547 return_VALUE(-ENODEV); 547 return -ENODEV;
548 548
549 /* demotion */ 549 /* demotion */
550 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 550 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
@@ -583,18 +583,17 @@ static int acpi_processor_set_power_policy(struct acpi_processor *pr)
583 higher = cx; 583 higher = cx;
584 } 584 }
585 585
586 return_VALUE(0); 586 return 0;
587} 587}
588 588
589static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) 589static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
590{ 590{
591 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_fadt");
592 591
593 if (!pr) 592 if (!pr)
594 return_VALUE(-EINVAL); 593 return -EINVAL;
595 594
596 if (!pr->pblk) 595 if (!pr->pblk)
597 return_VALUE(-ENODEV); 596 return -ENODEV;
598 597
599 /* if info is obtained from pblk/fadt, type equals state */ 598 /* if info is obtained from pblk/fadt, type equals state */
600 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; 599 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
@@ -606,7 +605,7 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
606 * an SMP system. 605 * an SMP system.
607 */ 606 */
608 if ((num_online_cpus() > 1) && !acpi_fadt.plvl2_up) 607 if ((num_online_cpus() > 1) && !acpi_fadt.plvl2_up)
609 return_VALUE(-ENODEV); 608 return -ENODEV;
610#endif 609#endif
611 610
612 /* determine C2 and C3 address from pblk */ 611 /* determine C2 and C3 address from pblk */
@@ -622,12 +621,11 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
622 pr->power.states[ACPI_STATE_C2].address, 621 pr->power.states[ACPI_STATE_C2].address,
623 pr->power.states[ACPI_STATE_C3].address)); 622 pr->power.states[ACPI_STATE_C3].address));
624 623
625 return_VALUE(0); 624 return 0;
626} 625}
627 626
628static int acpi_processor_get_power_info_default_c1(struct acpi_processor *pr) 627static int acpi_processor_get_power_info_default_c1(struct acpi_processor *pr)
629{ 628{
630 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_default_c1");
631 629
632 /* Zero initialize all the C-states info. */ 630 /* Zero initialize all the C-states info. */
633 memset(pr->power.states, 0, sizeof(pr->power.states)); 631 memset(pr->power.states, 0, sizeof(pr->power.states));
@@ -640,7 +638,7 @@ static int acpi_processor_get_power_info_default_c1(struct acpi_processor *pr)
640 pr->power.states[ACPI_STATE_C0].valid = 1; 638 pr->power.states[ACPI_STATE_C0].valid = 1;
641 pr->power.states[ACPI_STATE_C1].valid = 1; 639 pr->power.states[ACPI_STATE_C1].valid = 1;
642 640
643 return_VALUE(0); 641 return 0;
644} 642}
645 643
646static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) 644static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
@@ -652,10 +650,9 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
652 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 650 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
653 union acpi_object *cst; 651 union acpi_object *cst;
654 652
655 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_cst");
656 653
657 if (nocst) 654 if (nocst)
658 return_VALUE(-ENODEV); 655 return -ENODEV;
659 656
660 current_count = 1; 657 current_count = 1;
661 658
@@ -667,15 +664,14 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
667 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); 664 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
668 if (ACPI_FAILURE(status)) { 665 if (ACPI_FAILURE(status)) {
669 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); 666 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
670 return_VALUE(-ENODEV); 667 return -ENODEV;
671 } 668 }
672 669
673 cst = (union acpi_object *)buffer.pointer; 670 cst = (union acpi_object *)buffer.pointer;
674 671
675 /* There must be at least 2 elements */ 672 /* There must be at least 2 elements */
676 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { 673 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
677 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 674 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
678 "not enough elements in _CST\n"));
679 status = -EFAULT; 675 status = -EFAULT;
680 goto end; 676 goto end;
681 } 677 }
@@ -684,8 +680,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
684 680
685 /* Validate number of power states. */ 681 /* Validate number of power states. */
686 if (count < 1 || count != cst->package.count - 1) { 682 if (count < 1 || count != cst->package.count - 1) {
687 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 683 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
688 "count given by _CST is not valid\n"));
689 status = -EFAULT; 684 status = -EFAULT;
690 goto end; 685 goto end;
691 } 686 }
@@ -775,15 +770,14 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
775 end: 770 end:
776 acpi_os_free(buffer.pointer); 771 acpi_os_free(buffer.pointer);
777 772
778 return_VALUE(status); 773 return status;
779} 774}
780 775
781static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) 776static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
782{ 777{
783 ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c2");
784 778
785 if (!cx->address) 779 if (!cx->address)
786 return_VOID; 780 return;
787 781
788 /* 782 /*
789 * C2 latency must be less than or equal to 100 783 * C2 latency must be less than or equal to 100
@@ -792,7 +786,7 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
792 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { 786 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
793 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 787 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
794 "latency too large [%d]\n", cx->latency)); 788 "latency too large [%d]\n", cx->latency));
795 return_VOID; 789 return;
796 } 790 }
797 791
798 /* 792 /*
@@ -802,7 +796,7 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
802 cx->valid = 1; 796 cx->valid = 1;
803 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); 797 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
804 798
805 return_VOID; 799 return;
806} 800}
807 801
808static void acpi_processor_power_verify_c3(struct acpi_processor *pr, 802static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
@@ -810,10 +804,9 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
810{ 804{
811 static int bm_check_flag; 805 static int bm_check_flag;
812 806
813 ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c3");
814 807
815 if (!cx->address) 808 if (!cx->address)
816 return_VOID; 809 return;
817 810
818 /* 811 /*
819 * C3 latency must be less than or equal to 1000 812 * C3 latency must be less than or equal to 1000
@@ -822,7 +815,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
822 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { 815 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
823 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 816 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
824 "latency too large [%d]\n", cx->latency)); 817 "latency too large [%d]\n", cx->latency));
825 return_VOID; 818 return;
826 } 819 }
827 820
828 /* 821 /*
@@ -835,7 +828,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
835 else if (errata.piix4.fdma) { 828 else if (errata.piix4.fdma) {
836 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 829 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
837 "C3 not supported on PIIX4 with Type-F DMA\n")); 830 "C3 not supported on PIIX4 with Type-F DMA\n"));
838 return_VOID; 831 return;
839 } 832 }
840 833
841 /* All the logic here assumes flags.bm_check is same across all CPUs */ 834 /* All the logic here assumes flags.bm_check is same across all CPUs */
@@ -852,7 +845,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
852 if (!pr->flags.bm_control) { 845 if (!pr->flags.bm_control) {
853 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 846 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
854 "C3 support requires bus mastering control\n")); 847 "C3 support requires bus mastering control\n"));
855 return_VOID; 848 return;
856 } 849 }
857 } else { 850 } else {
858 /* 851 /*
@@ -863,7 +856,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
863 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 856 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
864 "Cache invalidation should work properly" 857 "Cache invalidation should work properly"
865 " for C3 to be enabled on SMP systems\n")); 858 " for C3 to be enabled on SMP systems\n"));
866 return_VOID; 859 return;
867 } 860 }
868 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 861 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD,
869 0, ACPI_MTX_DO_NOT_LOCK); 862 0, ACPI_MTX_DO_NOT_LOCK);
@@ -878,7 +871,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
878 cx->valid = 1; 871 cx->valid = 1;
879 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); 872 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
880 873
881 return_VOID; 874 return;
882} 875}
883 876
884static int acpi_processor_power_verify(struct acpi_processor *pr) 877static int acpi_processor_power_verify(struct acpi_processor *pr)
@@ -937,7 +930,6 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
937 unsigned int i; 930 unsigned int i;
938 int result; 931 int result;
939 932
940 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info");
941 933
942 /* NOTE: the idle thread may not be running while calling 934 /* NOTE: the idle thread may not be running while calling
943 * this function */ 935 * this function */
@@ -960,7 +952,7 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
960 */ 952 */
961 result = acpi_processor_set_power_policy(pr); 953 result = acpi_processor_set_power_policy(pr);
962 if (result) 954 if (result)
963 return_VALUE(result); 955 return result;
964 956
965 /* 957 /*
966 * if one state of type C2 or C3 is available, mark this 958 * if one state of type C2 or C3 is available, mark this
@@ -974,24 +966,23 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
974 } 966 }
975 } 967 }
976 968
977 return_VALUE(0); 969 return 0;
978} 970}
979 971
980int acpi_processor_cst_has_changed(struct acpi_processor *pr) 972int acpi_processor_cst_has_changed(struct acpi_processor *pr)
981{ 973{
982 int result = 0; 974 int result = 0;
983 975
984 ACPI_FUNCTION_TRACE("acpi_processor_cst_has_changed");
985 976
986 if (!pr) 977 if (!pr)
987 return_VALUE(-EINVAL); 978 return -EINVAL;
988 979
989 if (nocst) { 980 if (nocst) {
990 return_VALUE(-ENODEV); 981 return -ENODEV;
991 } 982 }
992 983
993 if (!pr->flags.power_setup_done) 984 if (!pr->flags.power_setup_done)
994 return_VALUE(-ENODEV); 985 return -ENODEV;
995 986
996 /* Fall back to the default idle loop */ 987 /* Fall back to the default idle loop */
997 pm_idle = pm_idle_save; 988 pm_idle = pm_idle_save;
@@ -1002,7 +993,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1002 if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) 993 if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
1003 pm_idle = acpi_processor_idle; 994 pm_idle = acpi_processor_idle;
1004 995
1005 return_VALUE(result); 996 return result;
1006} 997}
1007 998
1008/* proc interface */ 999/* proc interface */
@@ -1012,7 +1003,6 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1012 struct acpi_processor *pr = (struct acpi_processor *)seq->private; 1003 struct acpi_processor *pr = (struct acpi_processor *)seq->private;
1013 unsigned int i; 1004 unsigned int i;
1014 1005
1015 ACPI_FUNCTION_TRACE("acpi_processor_power_seq_show");
1016 1006
1017 if (!pr) 1007 if (!pr)
1018 goto end; 1008 goto end;
@@ -1064,13 +1054,14 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1064 else 1054 else
1065 seq_puts(seq, "demotion[--] "); 1055 seq_puts(seq, "demotion[--] ");
1066 1056
1067 seq_printf(seq, "latency[%03d] usage[%08d]\n", 1057 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
1068 pr->power.states[i].latency, 1058 pr->power.states[i].latency,
1069 pr->power.states[i].usage); 1059 pr->power.states[i].usage,
1060 pr->power.states[i].time);
1070 } 1061 }
1071 1062
1072 end: 1063 end:
1073 return_VALUE(0); 1064 return 0;
1074} 1065}
1075 1066
1076static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) 1067static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
@@ -1094,7 +1085,6 @@ int acpi_processor_power_init(struct acpi_processor *pr,
1094 struct proc_dir_entry *entry = NULL; 1085 struct proc_dir_entry *entry = NULL;
1095 unsigned int i; 1086 unsigned int i;
1096 1087
1097 ACPI_FUNCTION_TRACE("acpi_processor_power_init");
1098 1088
1099 if (!first_run) { 1089 if (!first_run) {
1100 dmi_check_system(processor_power_dmi_table); 1090 dmi_check_system(processor_power_dmi_table);
@@ -1106,14 +1096,14 @@ int acpi_processor_power_init(struct acpi_processor *pr,
1106 } 1096 }
1107 1097
1108 if (!pr) 1098 if (!pr)
1109 return_VALUE(-EINVAL); 1099 return -EINVAL;
1110 1100
1111 if (acpi_fadt.cst_cnt && !nocst) { 1101 if (acpi_fadt.cst_cnt && !nocst) {
1112 status = 1102 status =
1113 acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8); 1103 acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8);
1114 if (ACPI_FAILURE(status)) { 1104 if (ACPI_FAILURE(status)) {
1115 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 1105 ACPI_EXCEPTION((AE_INFO, status,
1116 "Notifying BIOS of _CST ability failed\n")); 1106 "Notifying BIOS of _CST ability failed"));
1117 } 1107 }
1118 } 1108 }
1119 1109
@@ -1142,9 +1132,7 @@ int acpi_processor_power_init(struct acpi_processor *pr,
1142 entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER, 1132 entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1143 S_IRUGO, acpi_device_dir(device)); 1133 S_IRUGO, acpi_device_dir(device));
1144 if (!entry) 1134 if (!entry)
1145 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 1135 return -EIO;
1146 "Unable to create '%s' fs entry\n",
1147 ACPI_PROCESSOR_FILE_POWER));
1148 else { 1136 else {
1149 entry->proc_fops = &acpi_processor_power_fops; 1137 entry->proc_fops = &acpi_processor_power_fops;
1150 entry->data = acpi_driver_data(device); 1138 entry->data = acpi_driver_data(device);
@@ -1153,13 +1141,12 @@ int acpi_processor_power_init(struct acpi_processor *pr,
1153 1141
1154 pr->flags.power_setup_done = 1; 1142 pr->flags.power_setup_done = 1;
1155 1143
1156 return_VALUE(0); 1144 return 0;
1157} 1145}
1158 1146
1159int acpi_processor_power_exit(struct acpi_processor *pr, 1147int acpi_processor_power_exit(struct acpi_processor *pr,
1160 struct acpi_device *device) 1148 struct acpi_device *device)
1161{ 1149{
1162 ACPI_FUNCTION_TRACE("acpi_processor_power_exit");
1163 1150
1164 pr->flags.power_setup_done = 0; 1151 pr->flags.power_setup_done = 0;
1165 1152
@@ -1179,5 +1166,5 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
1179 cpu_idle_wait(); 1166 cpu_idle_wait();
1180 } 1167 }
1181 1168
1182 return_VALUE(0); 1169 return 0;
1183} 1170}