aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/processor_idle.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r--drivers/acpi/processor_idle.c154
1 files changed, 75 insertions, 79 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 3b97a5eae9e..71066066d62 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support 8 * - Added processor hotplug support
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
@@ -99,6 +99,9 @@ static int set_max_cstate(struct dmi_system_id *id)
99static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { 99static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
100 { set_max_cstate, "IBM ThinkPad R40e", { 100 { set_max_cstate, "IBM ThinkPad R40e", {
101 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 101 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
102 DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
103 { set_max_cstate, "IBM ThinkPad R40e", {
104 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
102 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1}, 105 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
103 { set_max_cstate, "IBM ThinkPad R40e", { 106 { set_max_cstate, "IBM ThinkPad R40e", {
104 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 107 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
@@ -206,11 +209,11 @@ acpi_processor_power_activate(struct acpi_processor *pr,
206 209
207static void acpi_safe_halt(void) 210static void acpi_safe_halt(void)
208{ 211{
209 clear_thread_flag(TIF_POLLING_NRFLAG); 212 current_thread_info()->status &= ~TS_POLLING;
210 smp_mb__after_clear_bit(); 213 smp_mb__after_clear_bit();
211 if (!need_resched()) 214 if (!need_resched())
212 safe_halt(); 215 safe_halt();
213 set_thread_flag(TIF_POLLING_NRFLAG); 216 current_thread_info()->status |= TS_POLLING;
214} 217}
215 218
216static atomic_t c3_cpu_count; 219static atomic_t c3_cpu_count;
@@ -261,21 +264,15 @@ static void acpi_processor_idle(void)
261 u32 bm_status = 0; 264 u32 bm_status = 0;
262 unsigned long diff = jiffies - pr->power.bm_check_timestamp; 265 unsigned long diff = jiffies - pr->power.bm_check_timestamp;
263 266
264 if (diff > 32) 267 if (diff > 31)
265 diff = 32; 268 diff = 31;
266 269
267 while (diff) { 270 pr->power.bm_activity <<= diff;
268 /* if we didn't get called, assume there was busmaster activity */
269 diff--;
270 if (diff)
271 pr->power.bm_activity |= 0x1;
272 pr->power.bm_activity <<= 1;
273 }
274 271
275 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, 272 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS,
276 &bm_status, ACPI_MTX_DO_NOT_LOCK); 273 &bm_status, ACPI_MTX_DO_NOT_LOCK);
277 if (bm_status) { 274 if (bm_status) {
278 pr->power.bm_activity++; 275 pr->power.bm_activity |= 0x1;
279 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 276 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS,
280 1, ACPI_MTX_DO_NOT_LOCK); 277 1, ACPI_MTX_DO_NOT_LOCK);
281 } 278 }
@@ -287,16 +284,16 @@ static void acpi_processor_idle(void)
287 else if (errata.piix4.bmisx) { 284 else if (errata.piix4.bmisx) {
288 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 285 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
289 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 286 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
290 pr->power.bm_activity++; 287 pr->power.bm_activity |= 0x1;
291 } 288 }
292 289
293 pr->power.bm_check_timestamp = jiffies; 290 pr->power.bm_check_timestamp = jiffies;
294 291
295 /* 292 /*
296 * Apply bus mastering demotion policy. Automatically demote 293 * If bus mastering is or was active this jiffy, demote
297 * to avoid a faulty transition. Note that the processor 294 * to avoid a faulty transition. Note that the processor
298 * won't enter a low-power state during this call (to this 295 * won't enter a low-power state during this call (to this
299 * funciton) but should upon the next. 296 * function) but should upon the next.
300 * 297 *
301 * TBD: A better policy might be to fallback to the demotion 298 * TBD: A better policy might be to fallback to the demotion
302 * state (use it for this quantum only) istead of 299 * state (use it for this quantum only) istead of
@@ -304,7 +301,8 @@ static void acpi_processor_idle(void)
304 * qualification. This may, however, introduce DMA 301 * qualification. This may, however, introduce DMA
305 * issues (e.g. floppy DMA transfer overrun/underrun). 302 * issues (e.g. floppy DMA transfer overrun/underrun).
306 */ 303 */
307 if (pr->power.bm_activity & cx->demotion.threshold.bm) { 304 if ((pr->power.bm_activity & 0x1) &&
305 cx->demotion.threshold.bm) {
308 local_irq_enable(); 306 local_irq_enable();
309 next_state = cx->demotion.state; 307 next_state = cx->demotion.state;
310 goto end; 308 goto end;
@@ -322,18 +320,16 @@ static void acpi_processor_idle(void)
322 cx = &pr->power.states[ACPI_STATE_C1]; 320 cx = &pr->power.states[ACPI_STATE_C1];
323#endif 321#endif
324 322
325 cx->usage++;
326
327 /* 323 /*
328 * Sleep: 324 * Sleep:
329 * ------ 325 * ------
330 * Invoke the current Cx state to put the processor to sleep. 326 * Invoke the current Cx state to put the processor to sleep.
331 */ 327 */
332 if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { 328 if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
333 clear_thread_flag(TIF_POLLING_NRFLAG); 329 current_thread_info()->status &= ~TS_POLLING;
334 smp_mb__after_clear_bit(); 330 smp_mb__after_clear_bit();
335 if (need_resched()) { 331 if (need_resched()) {
336 set_thread_flag(TIF_POLLING_NRFLAG); 332 current_thread_info()->status |= TS_POLLING;
337 local_irq_enable(); 333 local_irq_enable();
338 return; 334 return;
339 } 335 }
@@ -365,13 +361,20 @@ static void acpi_processor_idle(void)
365 t1 = inl(acpi_fadt.xpm_tmr_blk.address); 361 t1 = inl(acpi_fadt.xpm_tmr_blk.address);
366 /* Invoke C2 */ 362 /* Invoke C2 */
367 inb(cx->address); 363 inb(cx->address);
368 /* Dummy op - must do something useless after P_LVL2 read */ 364 /* Dummy wait op - must do something useless after P_LVL2 read
365 because chipsets cannot guarantee that STPCLK# signal
366 gets asserted in time to freeze execution properly. */
369 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 367 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
370 /* Get end time (ticks) */ 368 /* Get end time (ticks) */
371 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 369 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
370
371#ifdef CONFIG_GENERIC_TIME
372 /* TSC halts in C2, so notify users */
373 mark_tsc_unstable();
374#endif
372 /* Re-enable interrupts */ 375 /* Re-enable interrupts */
373 local_irq_enable(); 376 local_irq_enable();
374 set_thread_flag(TIF_POLLING_NRFLAG); 377 current_thread_info()->status |= TS_POLLING;
375 /* Compute time (ticks) that we were actually asleep */ 378 /* Compute time (ticks) that we were actually asleep */
376 sleep_ticks = 379 sleep_ticks =
377 ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; 380 ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
@@ -398,7 +401,7 @@ static void acpi_processor_idle(void)
398 t1 = inl(acpi_fadt.xpm_tmr_blk.address); 401 t1 = inl(acpi_fadt.xpm_tmr_blk.address);
399 /* Invoke C3 */ 402 /* Invoke C3 */
400 inb(cx->address); 403 inb(cx->address);
401 /* Dummy op - must do something useless after P_LVL3 read */ 404 /* Dummy wait op (see above) */
402 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 405 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
403 /* Get end time (ticks) */ 406 /* Get end time (ticks) */
404 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 407 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
@@ -409,9 +412,13 @@ static void acpi_processor_idle(void)
409 ACPI_MTX_DO_NOT_LOCK); 412 ACPI_MTX_DO_NOT_LOCK);
410 } 413 }
411 414
415#ifdef CONFIG_GENERIC_TIME
416 /* TSC halts in C3, so notify users */
417 mark_tsc_unstable();
418#endif
412 /* Re-enable interrupts */ 419 /* Re-enable interrupts */
413 local_irq_enable(); 420 local_irq_enable();
414 set_thread_flag(TIF_POLLING_NRFLAG); 421 current_thread_info()->status |= TS_POLLING;
415 /* Compute time (ticks) that we were actually asleep */ 422 /* Compute time (ticks) that we were actually asleep */
416 sleep_ticks = 423 sleep_ticks =
417 ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; 424 ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
@@ -421,6 +428,9 @@ static void acpi_processor_idle(void)
421 local_irq_enable(); 428 local_irq_enable();
422 return; 429 return;
423 } 430 }
431 cx->usage++;
432 if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
433 cx->time += sleep_ticks;
424 434
425 next_state = pr->power.state; 435 next_state = pr->power.state;
426 436
@@ -508,10 +518,9 @@ static int acpi_processor_set_power_policy(struct acpi_processor *pr)
508 struct acpi_processor_cx *higher = NULL; 518 struct acpi_processor_cx *higher = NULL;
509 struct acpi_processor_cx *cx; 519 struct acpi_processor_cx *cx;
510 520
511 ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy");
512 521
513 if (!pr) 522 if (!pr)
514 return_VALUE(-EINVAL); 523 return -EINVAL;
515 524
516 /* 525 /*
517 * This function sets the default Cx state policy (OS idle handler). 526 * This function sets the default Cx state policy (OS idle handler).
@@ -535,7 +544,7 @@ static int acpi_processor_set_power_policy(struct acpi_processor *pr)
535 } 544 }
536 545
537 if (!state_is_set) 546 if (!state_is_set)
538 return_VALUE(-ENODEV); 547 return -ENODEV;
539 548
540 /* demotion */ 549 /* demotion */
541 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 550 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
@@ -574,18 +583,17 @@ static int acpi_processor_set_power_policy(struct acpi_processor *pr)
574 higher = cx; 583 higher = cx;
575 } 584 }
576 585
577 return_VALUE(0); 586 return 0;
578} 587}
579 588
580static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) 589static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
581{ 590{
582 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_fadt");
583 591
584 if (!pr) 592 if (!pr)
585 return_VALUE(-EINVAL); 593 return -EINVAL;
586 594
587 if (!pr->pblk) 595 if (!pr->pblk)
588 return_VALUE(-ENODEV); 596 return -ENODEV;
589 597
590 /* if info is obtained from pblk/fadt, type equals state */ 598 /* if info is obtained from pblk/fadt, type equals state */
591 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; 599 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
@@ -597,7 +605,7 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
597 * an SMP system. 605 * an SMP system.
598 */ 606 */
599 if ((num_online_cpus() > 1) && !acpi_fadt.plvl2_up) 607 if ((num_online_cpus() > 1) && !acpi_fadt.plvl2_up)
600 return_VALUE(-ENODEV); 608 return -ENODEV;
601#endif 609#endif
602 610
603 /* determine C2 and C3 address from pblk */ 611 /* determine C2 and C3 address from pblk */
@@ -613,12 +621,11 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
613 pr->power.states[ACPI_STATE_C2].address, 621 pr->power.states[ACPI_STATE_C2].address,
614 pr->power.states[ACPI_STATE_C3].address)); 622 pr->power.states[ACPI_STATE_C3].address));
615 623
616 return_VALUE(0); 624 return 0;
617} 625}
618 626
619static int acpi_processor_get_power_info_default_c1(struct acpi_processor *pr) 627static int acpi_processor_get_power_info_default_c1(struct acpi_processor *pr)
620{ 628{
621 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_default_c1");
622 629
623 /* Zero initialize all the C-states info. */ 630 /* Zero initialize all the C-states info. */
624 memset(pr->power.states, 0, sizeof(pr->power.states)); 631 memset(pr->power.states, 0, sizeof(pr->power.states));
@@ -631,7 +638,7 @@ static int acpi_processor_get_power_info_default_c1(struct acpi_processor *pr)
631 pr->power.states[ACPI_STATE_C0].valid = 1; 638 pr->power.states[ACPI_STATE_C0].valid = 1;
632 pr->power.states[ACPI_STATE_C1].valid = 1; 639 pr->power.states[ACPI_STATE_C1].valid = 1;
633 640
634 return_VALUE(0); 641 return 0;
635} 642}
636 643
637static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) 644static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
@@ -643,10 +650,9 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
643 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 650 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
644 union acpi_object *cst; 651 union acpi_object *cst;
645 652
646 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_cst");
647 653
648 if (nocst) 654 if (nocst)
649 return_VALUE(-ENODEV); 655 return -ENODEV;
650 656
651 current_count = 1; 657 current_count = 1;
652 658
@@ -658,15 +664,14 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
658 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); 664 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
659 if (ACPI_FAILURE(status)) { 665 if (ACPI_FAILURE(status)) {
660 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); 666 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
661 return_VALUE(-ENODEV); 667 return -ENODEV;
662 } 668 }
663 669
664 cst = (union acpi_object *)buffer.pointer; 670 cst = (union acpi_object *)buffer.pointer;
665 671
666 /* There must be at least 2 elements */ 672 /* There must be at least 2 elements */
667 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { 673 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
668 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 674 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
669 "not enough elements in _CST\n"));
670 status = -EFAULT; 675 status = -EFAULT;
671 goto end; 676 goto end;
672 } 677 }
@@ -675,8 +680,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
675 680
676 /* Validate number of power states. */ 681 /* Validate number of power states. */
677 if (count < 1 || count != cst->package.count - 1) { 682 if (count < 1 || count != cst->package.count - 1) {
678 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 683 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
679 "count given by _CST is not valid\n"));
680 status = -EFAULT; 684 status = -EFAULT;
681 goto end; 685 goto end;
682 } 686 }
@@ -764,17 +768,16 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
764 status = -EFAULT; 768 status = -EFAULT;
765 769
766 end: 770 end:
767 acpi_os_free(buffer.pointer); 771 kfree(buffer.pointer);
768 772
769 return_VALUE(status); 773 return status;
770} 774}
771 775
772static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) 776static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
773{ 777{
774 ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c2");
775 778
776 if (!cx->address) 779 if (!cx->address)
777 return_VOID; 780 return;
778 781
779 /* 782 /*
780 * C2 latency must be less than or equal to 100 783 * C2 latency must be less than or equal to 100
@@ -783,7 +786,7 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
783 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { 786 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
784 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 787 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
785 "latency too large [%d]\n", cx->latency)); 788 "latency too large [%d]\n", cx->latency));
786 return_VOID; 789 return;
787 } 790 }
788 791
789 /* 792 /*
@@ -793,7 +796,7 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
793 cx->valid = 1; 796 cx->valid = 1;
794 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); 797 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
795 798
796 return_VOID; 799 return;
797} 800}
798 801
799static void acpi_processor_power_verify_c3(struct acpi_processor *pr, 802static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
@@ -801,10 +804,9 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
801{ 804{
802 static int bm_check_flag; 805 static int bm_check_flag;
803 806
804 ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c3");
805 807
806 if (!cx->address) 808 if (!cx->address)
807 return_VOID; 809 return;
808 810
809 /* 811 /*
810 * C3 latency must be less than or equal to 1000 812 * C3 latency must be less than or equal to 1000
@@ -813,7 +815,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
813 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { 815 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
814 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 816 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
815 "latency too large [%d]\n", cx->latency)); 817 "latency too large [%d]\n", cx->latency));
816 return_VOID; 818 return;
817 } 819 }
818 820
819 /* 821 /*
@@ -826,7 +828,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
826 else if (errata.piix4.fdma) { 828 else if (errata.piix4.fdma) {
827 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 829 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
828 "C3 not supported on PIIX4 with Type-F DMA\n")); 830 "C3 not supported on PIIX4 with Type-F DMA\n"));
829 return_VOID; 831 return;
830 } 832 }
831 833
832 /* All the logic here assumes flags.bm_check is same across all CPUs */ 834 /* All the logic here assumes flags.bm_check is same across all CPUs */
@@ -843,7 +845,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
843 if (!pr->flags.bm_control) { 845 if (!pr->flags.bm_control) {
844 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 846 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
845 "C3 support requires bus mastering control\n")); 847 "C3 support requires bus mastering control\n"));
846 return_VOID; 848 return;
847 } 849 }
848 } else { 850 } else {
849 /* 851 /*
@@ -854,7 +856,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
854 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 856 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
855 "Cache invalidation should work properly" 857 "Cache invalidation should work properly"
856 " for C3 to be enabled on SMP systems\n")); 858 " for C3 to be enabled on SMP systems\n"));
857 return_VOID; 859 return;
858 } 860 }
859 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 861 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD,
860 0, ACPI_MTX_DO_NOT_LOCK); 862 0, ACPI_MTX_DO_NOT_LOCK);
@@ -869,7 +871,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
869 cx->valid = 1; 871 cx->valid = 1;
870 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); 872 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
871 873
872 return_VOID; 874 return;
873} 875}
874 876
875static int acpi_processor_power_verify(struct acpi_processor *pr) 877static int acpi_processor_power_verify(struct acpi_processor *pr)
@@ -928,7 +930,6 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
928 unsigned int i; 930 unsigned int i;
929 int result; 931 int result;
930 932
931 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info");
932 933
933 /* NOTE: the idle thread may not be running while calling 934 /* NOTE: the idle thread may not be running while calling
934 * this function */ 935 * this function */
@@ -951,7 +952,7 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
951 */ 952 */
952 result = acpi_processor_set_power_policy(pr); 953 result = acpi_processor_set_power_policy(pr);
953 if (result) 954 if (result)
954 return_VALUE(result); 955 return result;
955 956
956 /* 957 /*
957 * if one state of type C2 or C3 is available, mark this 958 * if one state of type C2 or C3 is available, mark this
@@ -965,24 +966,23 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
965 } 966 }
966 } 967 }
967 968
968 return_VALUE(0); 969 return 0;
969} 970}
970 971
971int acpi_processor_cst_has_changed(struct acpi_processor *pr) 972int acpi_processor_cst_has_changed(struct acpi_processor *pr)
972{ 973{
973 int result = 0; 974 int result = 0;
974 975
975 ACPI_FUNCTION_TRACE("acpi_processor_cst_has_changed");
976 976
977 if (!pr) 977 if (!pr)
978 return_VALUE(-EINVAL); 978 return -EINVAL;
979 979
980 if (nocst) { 980 if (nocst) {
981 return_VALUE(-ENODEV); 981 return -ENODEV;
982 } 982 }
983 983
984 if (!pr->flags.power_setup_done) 984 if (!pr->flags.power_setup_done)
985 return_VALUE(-ENODEV); 985 return -ENODEV;
986 986
987 /* Fall back to the default idle loop */ 987 /* Fall back to the default idle loop */
988 pm_idle = pm_idle_save; 988 pm_idle = pm_idle_save;
@@ -993,7 +993,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
993 if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) 993 if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
994 pm_idle = acpi_processor_idle; 994 pm_idle = acpi_processor_idle;
995 995
996 return_VALUE(result); 996 return result;
997} 997}
998 998
999/* proc interface */ 999/* proc interface */
@@ -1003,7 +1003,6 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1003 struct acpi_processor *pr = (struct acpi_processor *)seq->private; 1003 struct acpi_processor *pr = (struct acpi_processor *)seq->private;
1004 unsigned int i; 1004 unsigned int i;
1005 1005
1006 ACPI_FUNCTION_TRACE("acpi_processor_power_seq_show");
1007 1006
1008 if (!pr) 1007 if (!pr)
1009 goto end; 1008 goto end;
@@ -1055,13 +1054,14 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1055 else 1054 else
1056 seq_puts(seq, "demotion[--] "); 1055 seq_puts(seq, "demotion[--] ");
1057 1056
1058 seq_printf(seq, "latency[%03d] usage[%08d]\n", 1057 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
1059 pr->power.states[i].latency, 1058 pr->power.states[i].latency,
1060 pr->power.states[i].usage); 1059 pr->power.states[i].usage,
1060 pr->power.states[i].time);
1061 } 1061 }
1062 1062
1063 end: 1063 end:
1064 return_VALUE(0); 1064 return 0;
1065} 1065}
1066 1066
1067static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) 1067static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
@@ -1070,7 +1070,7 @@ static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
1070 PDE(inode)->data); 1070 PDE(inode)->data);
1071} 1071}
1072 1072
1073static struct file_operations acpi_processor_power_fops = { 1073static const struct file_operations acpi_processor_power_fops = {
1074 .open = acpi_processor_power_open_fs, 1074 .open = acpi_processor_power_open_fs,
1075 .read = seq_read, 1075 .read = seq_read,
1076 .llseek = seq_lseek, 1076 .llseek = seq_lseek,
@@ -1085,7 +1085,6 @@ int acpi_processor_power_init(struct acpi_processor *pr,
1085 struct proc_dir_entry *entry = NULL; 1085 struct proc_dir_entry *entry = NULL;
1086 unsigned int i; 1086 unsigned int i;
1087 1087
1088 ACPI_FUNCTION_TRACE("acpi_processor_power_init");
1089 1088
1090 if (!first_run) { 1089 if (!first_run) {
1091 dmi_check_system(processor_power_dmi_table); 1090 dmi_check_system(processor_power_dmi_table);
@@ -1097,14 +1096,14 @@ int acpi_processor_power_init(struct acpi_processor *pr,
1097 } 1096 }
1098 1097
1099 if (!pr) 1098 if (!pr)
1100 return_VALUE(-EINVAL); 1099 return -EINVAL;
1101 1100
1102 if (acpi_fadt.cst_cnt && !nocst) { 1101 if (acpi_fadt.cst_cnt && !nocst) {
1103 status = 1102 status =
1104 acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8); 1103 acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8);
1105 if (ACPI_FAILURE(status)) { 1104 if (ACPI_FAILURE(status)) {
1106 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 1105 ACPI_EXCEPTION((AE_INFO, status,
1107 "Notifying BIOS of _CST ability failed\n")); 1106 "Notifying BIOS of _CST ability failed"));
1108 } 1107 }
1109 } 1108 }
1110 1109
@@ -1133,9 +1132,7 @@ int acpi_processor_power_init(struct acpi_processor *pr,
1133 entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER, 1132 entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1134 S_IRUGO, acpi_device_dir(device)); 1133 S_IRUGO, acpi_device_dir(device));
1135 if (!entry) 1134 if (!entry)
1136 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 1135 return -EIO;
1137 "Unable to create '%s' fs entry\n",
1138 ACPI_PROCESSOR_FILE_POWER));
1139 else { 1136 else {
1140 entry->proc_fops = &acpi_processor_power_fops; 1137 entry->proc_fops = &acpi_processor_power_fops;
1141 entry->data = acpi_driver_data(device); 1138 entry->data = acpi_driver_data(device);
@@ -1144,13 +1141,12 @@ int acpi_processor_power_init(struct acpi_processor *pr,
1144 1141
1145 pr->flags.power_setup_done = 1; 1142 pr->flags.power_setup_done = 1;
1146 1143
1147 return_VALUE(0); 1144 return 0;
1148} 1145}
1149 1146
1150int acpi_processor_power_exit(struct acpi_processor *pr, 1147int acpi_processor_power_exit(struct acpi_processor *pr,
1151 struct acpi_device *device) 1148 struct acpi_device *device)
1152{ 1149{
1153 ACPI_FUNCTION_TRACE("acpi_processor_power_exit");
1154 1150
1155 pr->flags.power_setup_done = 0; 1151 pr->flags.power_setup_done = 0;
1156 1152
@@ -1170,5 +1166,5 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
1170 cpu_idle_wait(); 1166 cpu_idle_wait();
1171 } 1167 }
1172 1168
1173 return_VALUE(0); 1169 return 0;
1174} 1170}