diff options
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r-- | drivers/acpi/processor_idle.c | 138 |
1 files changed, 97 insertions, 41 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index c9d671cf7857..893b074e3d1a 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -6,6 +6,8 @@ | |||
6 | * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> | 6 | * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> |
7 | * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | 7 | * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
8 | * - Added processor hotplug support | 8 | * - Added processor hotplug support |
9 | * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
10 | * - Added support for C3 on SMP | ||
9 | * | 11 | * |
10 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 12 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
11 | * | 13 | * |
@@ -142,7 +144,7 @@ acpi_processor_power_activate ( | |||
142 | switch (old->type) { | 144 | switch (old->type) { |
143 | case ACPI_STATE_C3: | 145 | case ACPI_STATE_C3: |
144 | /* Disable bus master reload */ | 146 | /* Disable bus master reload */ |
145 | if (new->type != ACPI_STATE_C3) | 147 | if (new->type != ACPI_STATE_C3 && pr->flags.bm_check) |
146 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK); | 148 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK); |
147 | break; | 149 | break; |
148 | } | 150 | } |
@@ -152,7 +154,7 @@ acpi_processor_power_activate ( | |||
152 | switch (new->type) { | 154 | switch (new->type) { |
153 | case ACPI_STATE_C3: | 155 | case ACPI_STATE_C3: |
154 | /* Enable bus master reload */ | 156 | /* Enable bus master reload */ |
155 | if (old->type != ACPI_STATE_C3) | 157 | if (old->type != ACPI_STATE_C3 && pr->flags.bm_check) |
156 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1, ACPI_MTX_DO_NOT_LOCK); | 158 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1, ACPI_MTX_DO_NOT_LOCK); |
157 | break; | 159 | break; |
158 | } | 160 | } |
@@ -163,6 +165,9 @@ acpi_processor_power_activate ( | |||
163 | } | 165 | } |
164 | 166 | ||
165 | 167 | ||
168 | static atomic_t c3_cpu_count; | ||
169 | |||
170 | |||
166 | static void acpi_processor_idle (void) | 171 | static void acpi_processor_idle (void) |
167 | { | 172 | { |
168 | struct acpi_processor *pr = NULL; | 173 | struct acpi_processor *pr = NULL; |
@@ -297,8 +302,22 @@ static void acpi_processor_idle (void) | |||
297 | break; | 302 | break; |
298 | 303 | ||
299 | case ACPI_STATE_C3: | 304 | case ACPI_STATE_C3: |
300 | /* Disable bus master arbitration */ | 305 | |
301 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, ACPI_MTX_DO_NOT_LOCK); | 306 | if (pr->flags.bm_check) { |
307 | if (atomic_inc_return(&c3_cpu_count) == | ||
308 | num_online_cpus()) { | ||
309 | /* | ||
310 | * All CPUs are trying to go to C3 | ||
311 | * Disable bus master arbitration | ||
312 | */ | ||
313 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, | ||
314 | ACPI_MTX_DO_NOT_LOCK); | ||
315 | } | ||
316 | } else { | ||
317 | /* SMP with no shared cache... Invalidate cache */ | ||
318 | ACPI_FLUSH_CPU_CACHE(); | ||
319 | } | ||
320 | |||
302 | /* Get start time (ticks) */ | 321 | /* Get start time (ticks) */ |
303 | t1 = inl(acpi_fadt.xpm_tmr_blk.address); | 322 | t1 = inl(acpi_fadt.xpm_tmr_blk.address); |
304 | /* Invoke C3 */ | 323 | /* Invoke C3 */ |
@@ -307,8 +326,12 @@ static void acpi_processor_idle (void) | |||
307 | t2 = inl(acpi_fadt.xpm_tmr_blk.address); | 326 | t2 = inl(acpi_fadt.xpm_tmr_blk.address); |
308 | /* Get end time (ticks) */ | 327 | /* Get end time (ticks) */ |
309 | t2 = inl(acpi_fadt.xpm_tmr_blk.address); | 328 | t2 = inl(acpi_fadt.xpm_tmr_blk.address); |
310 | /* Enable bus master arbitration */ | 329 | if (pr->flags.bm_check) { |
311 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, ACPI_MTX_DO_NOT_LOCK); | 330 | /* Enable bus master arbitration */ |
331 | atomic_dec(&c3_cpu_count); | ||
332 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, ACPI_MTX_DO_NOT_LOCK); | ||
333 | } | ||
334 | |||
312 | /* Re-enable interrupts */ | 335 | /* Re-enable interrupts */ |
313 | local_irq_enable(); | 336 | local_irq_enable(); |
314 | /* Compute time (ticks) that we were actually asleep */ | 337 | /* Compute time (ticks) that we were actually asleep */ |
@@ -519,6 +542,29 @@ static int acpi_processor_get_power_info_fadt (struct acpi_processor *pr) | |||
519 | } | 542 | } |
520 | 543 | ||
521 | 544 | ||
545 | static int acpi_processor_get_power_info_default_c1 (struct acpi_processor *pr) | ||
546 | { | ||
547 | int i; | ||
548 | |||
549 | ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_default_c1"); | ||
550 | |||
551 | for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++) | ||
552 | memset(pr->power.states, 0, sizeof(struct acpi_processor_cx)); | ||
553 | |||
554 | /* if info is obtained from pblk/fadt, type equals state */ | ||
555 | pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; | ||
556 | pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; | ||
557 | pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; | ||
558 | |||
559 | /* the C0 state only exists as a filler in our array, | ||
560 | * and all processors need to support C1 */ | ||
561 | pr->power.states[ACPI_STATE_C0].valid = 1; | ||
562 | pr->power.states[ACPI_STATE_C1].valid = 1; | ||
563 | |||
564 | return_VALUE(0); | ||
565 | } | ||
566 | |||
567 | |||
522 | static int acpi_processor_get_power_info_cst (struct acpi_processor *pr) | 568 | static int acpi_processor_get_power_info_cst (struct acpi_processor *pr) |
523 | { | 569 | { |
524 | acpi_status status = 0; | 570 | acpi_status status = 0; |
@@ -529,9 +575,6 @@ static int acpi_processor_get_power_info_cst (struct acpi_processor *pr) | |||
529 | 575 | ||
530 | ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_cst"); | 576 | ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_cst"); |
531 | 577 | ||
532 | if (errata.smp) | ||
533 | return_VALUE(-ENODEV); | ||
534 | |||
535 | if (nocst) | 578 | if (nocst) |
536 | return_VALUE(-ENODEV); | 579 | return_VALUE(-ENODEV); |
537 | 580 | ||
@@ -664,13 +707,6 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) | |||
664 | return_VOID; | 707 | return_VOID; |
665 | } | 708 | } |
666 | 709 | ||
667 | /* We're (currently) only supporting C2 on UP */ | ||
668 | else if (errata.smp) { | ||
669 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
670 | "C2 not supported in SMP mode\n")); | ||
671 | return_VOID; | ||
672 | } | ||
673 | |||
674 | /* | 710 | /* |
675 | * Otherwise we've met all of our C2 requirements. | 711 | * Otherwise we've met all of our C2 requirements. |
676 | * Normalize the C2 latency to expidite policy | 712 | * Normalize the C2 latency to expidite policy |
@@ -686,6 +722,8 @@ static void acpi_processor_power_verify_c3( | |||
686 | struct acpi_processor *pr, | 722 | struct acpi_processor *pr, |
687 | struct acpi_processor_cx *cx) | 723 | struct acpi_processor_cx *cx) |
688 | { | 724 | { |
725 | static int bm_check_flag; | ||
726 | |||
689 | ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c3"); | 727 | ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c3"); |
690 | 728 | ||
691 | if (!cx->address) | 729 | if (!cx->address) |
@@ -702,20 +740,6 @@ static void acpi_processor_power_verify_c3( | |||
702 | return_VOID; | 740 | return_VOID; |
703 | } | 741 | } |
704 | 742 | ||
705 | /* bus mastering control is necessary */ | ||
706 | else if (!pr->flags.bm_control) { | ||
707 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
708 | "C3 support requires bus mastering control\n")); | ||
709 | return_VOID; | ||
710 | } | ||
711 | |||
712 | /* We're (currently) only supporting C2 on UP */ | ||
713 | else if (errata.smp) { | ||
714 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
715 | "C3 not supported in SMP mode\n")); | ||
716 | return_VOID; | ||
717 | } | ||
718 | |||
719 | /* | 743 | /* |
720 | * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) | 744 | * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) |
721 | * DMA transfers are used by any ISA device to avoid livelock. | 745 | * DMA transfers are used by any ISA device to avoid livelock. |
@@ -729,6 +753,39 @@ static void acpi_processor_power_verify_c3( | |||
729 | return_VOID; | 753 | return_VOID; |
730 | } | 754 | } |
731 | 755 | ||
756 | /* All the logic here assumes flags.bm_check is same across all CPUs */ | ||
757 | if (!bm_check_flag) { | ||
758 | /* Determine whether bm_check is needed based on CPU */ | ||
759 | acpi_processor_power_init_bm_check(&(pr->flags), pr->id); | ||
760 | bm_check_flag = pr->flags.bm_check; | ||
761 | } else { | ||
762 | pr->flags.bm_check = bm_check_flag; | ||
763 | } | ||
764 | |||
765 | if (pr->flags.bm_check) { | ||
766 | printk("Disabling BM access before entering C3\n"); | ||
767 | /* bus mastering control is necessary */ | ||
768 | if (!pr->flags.bm_control) { | ||
769 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
770 | "C3 support requires bus mastering control\n")); | ||
771 | return_VOID; | ||
772 | } | ||
773 | } else { | ||
774 | printk("Invalidating cache before entering C3\n"); | ||
775 | /* | ||
776 | * WBINVD should be set in fadt, for C3 state to be | ||
777 | * supported on when bm_check is not required. | ||
778 | */ | ||
779 | if (acpi_fadt.wb_invd != 1) { | ||
780 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
781 | "Cache invalidation should work properly" | ||
782 | " for C3 to be enabled on SMP systems\n")); | ||
783 | return_VOID; | ||
784 | } | ||
785 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, | ||
786 | 0, ACPI_MTX_DO_NOT_LOCK); | ||
787 | } | ||
788 | |||
732 | /* | 789 | /* |
733 | * Otherwise we've met all of our C3 requirements. | 790 | * Otherwise we've met all of our C3 requirements. |
734 | * Normalize the C3 latency to expidite policy. Enable | 791 | * Normalize the C3 latency to expidite policy. Enable |
@@ -737,7 +794,6 @@ static void acpi_processor_power_verify_c3( | |||
737 | */ | 794 | */ |
738 | cx->valid = 1; | 795 | cx->valid = 1; |
739 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); | 796 | cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); |
740 | pr->flags.bm_check = 1; | ||
741 | 797 | ||
742 | return_VOID; | 798 | return_VOID; |
743 | } | 799 | } |
@@ -787,10 +843,7 @@ static int acpi_processor_get_power_info ( | |||
787 | if ((result) || (acpi_processor_power_verify(pr) < 2)) { | 843 | if ((result) || (acpi_processor_power_verify(pr) < 2)) { |
788 | result = acpi_processor_get_power_info_fadt(pr); | 844 | result = acpi_processor_get_power_info_fadt(pr); |
789 | if (result) | 845 | if (result) |
790 | return_VALUE(result); | 846 | result = acpi_processor_get_power_info_default_c1(pr); |
791 | |||
792 | if (acpi_processor_power_verify(pr) < 2) | ||
793 | return_VALUE(-ENODEV); | ||
794 | } | 847 | } |
795 | 848 | ||
796 | /* | 849 | /* |
@@ -810,11 +863,10 @@ static int acpi_processor_get_power_info ( | |||
810 | * CPU as being "idle manageable" | 863 | * CPU as being "idle manageable" |
811 | */ | 864 | */ |
812 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { | 865 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { |
813 | if (pr->power.states[i].valid) | 866 | if (pr->power.states[i].valid) { |
814 | pr->power.count = i; | 867 | pr->power.count = i; |
815 | if ((pr->power.states[i].valid) && | ||
816 | (pr->power.states[i].type >= ACPI_STATE_C2)) | ||
817 | pr->flags.power = 1; | 868 | pr->flags.power = 1; |
869 | } | ||
818 | } | 870 | } |
819 | 871 | ||
820 | return_VALUE(0); | 872 | return_VALUE(0); |
@@ -829,7 +881,7 @@ int acpi_processor_cst_has_changed (struct acpi_processor *pr) | |||
829 | if (!pr) | 881 | if (!pr) |
830 | return_VALUE(-EINVAL); | 882 | return_VALUE(-EINVAL); |
831 | 883 | ||
832 | if (errata.smp || nocst) { | 884 | if ( nocst) { |
833 | return_VALUE(-ENODEV); | 885 | return_VALUE(-ENODEV); |
834 | } | 886 | } |
835 | 887 | ||
@@ -929,7 +981,6 @@ static struct file_operations acpi_processor_power_fops = { | |||
929 | .release = single_release, | 981 | .release = single_release, |
930 | }; | 982 | }; |
931 | 983 | ||
932 | |||
933 | int acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *device) | 984 | int acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *device) |
934 | { | 985 | { |
935 | acpi_status status = 0; | 986 | acpi_status status = 0; |
@@ -946,7 +997,10 @@ int acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *dev | |||
946 | first_run++; | 997 | first_run++; |
947 | } | 998 | } |
948 | 999 | ||
949 | if (!errata.smp && (pr->id == 0) && acpi_fadt.cst_cnt && !nocst) { | 1000 | if (!pr) |
1001 | return_VALUE(-EINVAL); | ||
1002 | |||
1003 | if (acpi_fadt.cst_cnt && !nocst) { | ||
950 | status = acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8); | 1004 | status = acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8); |
951 | if (ACPI_FAILURE(status)) { | 1005 | if (ACPI_FAILURE(status)) { |
952 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, | 1006 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, |
@@ -954,6 +1008,8 @@ int acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *dev | |||
954 | } | 1008 | } |
955 | } | 1009 | } |
956 | 1010 | ||
1011 | acpi_processor_power_init_pdc(&(pr->power), pr->id); | ||
1012 | acpi_processor_set_pdc(pr, pr->power.pdc); | ||
957 | acpi_processor_get_power_info(pr); | 1013 | acpi_processor_get_power_info(pr); |
958 | 1014 | ||
959 | /* | 1015 | /* |