diff options
| -rw-r--r-- | drivers/acpi/processor_idle.c | 182 |
1 files changed, 52 insertions, 130 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 87b704e41877..c256bd7fbd78 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
| @@ -681,15 +681,13 @@ static int acpi_idle_bm_check(void) | |||
| 681 | } | 681 | } |
| 682 | 682 | ||
| 683 | /** | 683 | /** |
| 684 | * acpi_idle_do_entry - a helper function that does C2 and C3 type entry | 684 | * acpi_idle_do_entry - enter idle state using the appropriate method |
| 685 | * @cx: cstate data | 685 | * @cx: cstate data |
| 686 | * | 686 | * |
| 687 | * Caller disables interrupt before call and enables interrupt after return. | 687 | * Caller disables interrupt before call and enables interrupt after return. |
| 688 | */ | 688 | */ |
| 689 | static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | 689 | static void acpi_idle_do_entry(struct acpi_processor_cx *cx) |
| 690 | { | 690 | { |
| 691 | /* Don't trace irqs off for idle */ | ||
| 692 | stop_critical_timings(); | ||
| 693 | if (cx->entry_method == ACPI_CSTATE_FFH) { | 691 | if (cx->entry_method == ACPI_CSTATE_FFH) { |
| 694 | /* Call into architectural FFH based C-state */ | 692 | /* Call into architectural FFH based C-state */ |
| 695 | acpi_processor_ffh_cstate_enter(cx); | 693 | acpi_processor_ffh_cstate_enter(cx); |
| @@ -703,38 +701,9 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | |||
| 703 | gets asserted in time to freeze execution properly. */ | 701 | gets asserted in time to freeze execution properly. */ |
| 704 | inl(acpi_gbl_FADT.xpm_timer_block.address); | 702 | inl(acpi_gbl_FADT.xpm_timer_block.address); |
| 705 | } | 703 | } |
| 706 | start_critical_timings(); | ||
| 707 | } | 704 | } |
| 708 | 705 | ||
| 709 | /** | 706 | /** |
| 710 | * acpi_idle_enter_c1 - enters an ACPI C1 state-type | ||
| 711 | * @dev: the target CPU | ||
| 712 | * @drv: cpuidle driver containing cpuidle state info | ||
| 713 | * @index: index of target state | ||
| 714 | * | ||
| 715 | * This is equivalent to the HALT instruction. | ||
| 716 | */ | ||
| 717 | static int acpi_idle_enter_c1(struct cpuidle_device *dev, | ||
| 718 | struct cpuidle_driver *drv, int index) | ||
| 719 | { | ||
| 720 | struct acpi_processor *pr; | ||
| 721 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); | ||
| 722 | |||
| 723 | pr = __this_cpu_read(processors); | ||
| 724 | |||
| 725 | if (unlikely(!pr)) | ||
| 726 | return -EINVAL; | ||
| 727 | |||
| 728 | lapic_timer_state_broadcast(pr, cx, 1); | ||
| 729 | acpi_idle_do_entry(cx); | ||
| 730 | |||
| 731 | lapic_timer_state_broadcast(pr, cx, 0); | ||
| 732 | |||
| 733 | return index; | ||
| 734 | } | ||
| 735 | |||
| 736 | |||
| 737 | /** | ||
| 738 | * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining) | 707 | * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining) |
| 739 | * @dev: the target CPU | 708 | * @dev: the target CPU |
| 740 | * @index: the index of suggested state | 709 | * @index: the index of suggested state |
| @@ -761,47 +730,11 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) | |||
| 761 | return 0; | 730 | return 0; |
| 762 | } | 731 | } |
| 763 | 732 | ||
| 764 | /** | 733 | static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) |
| 765 | * acpi_idle_enter_simple - enters an ACPI state without BM handling | ||
| 766 | * @dev: the target CPU | ||
| 767 | * @drv: cpuidle driver with cpuidle state information | ||
| 768 | * @index: the index of suggested state | ||
| 769 | */ | ||
| 770 | static int acpi_idle_enter_simple(struct cpuidle_device *dev, | ||
| 771 | struct cpuidle_driver *drv, int index) | ||
| 772 | { | 734 | { |
| 773 | struct acpi_processor *pr; | 735 | return IS_ENABLED(CONFIG_HOTPLUG_CPU) && num_online_cpus() > 1 && |
| 774 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); | 736 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED) && |
| 775 | 737 | !pr->flags.has_cst; | |
| 776 | pr = __this_cpu_read(processors); | ||
| 777 | |||
| 778 | if (unlikely(!pr)) | ||
| 779 | return -EINVAL; | ||
| 780 | |||
| 781 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 782 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && | ||
| 783 | !pr->flags.has_cst && | ||
| 784 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | ||
| 785 | return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START); | ||
| 786 | #endif | ||
| 787 | |||
| 788 | /* | ||
| 789 | * Must be done before busmaster disable as we might need to | ||
| 790 | * access HPET ! | ||
| 791 | */ | ||
| 792 | lapic_timer_state_broadcast(pr, cx, 1); | ||
| 793 | |||
| 794 | if (cx->type == ACPI_STATE_C3) | ||
| 795 | ACPI_FLUSH_CPU_CACHE(); | ||
| 796 | |||
| 797 | /* Tell the scheduler that we are going deep-idle: */ | ||
| 798 | sched_clock_idle_sleep_event(); | ||
| 799 | acpi_idle_do_entry(cx); | ||
| 800 | |||
| 801 | sched_clock_idle_wakeup_event(0); | ||
| 802 | |||
| 803 | lapic_timer_state_broadcast(pr, cx, 0); | ||
| 804 | return index; | ||
| 805 | } | 738 | } |
| 806 | 739 | ||
| 807 | static int c3_cpu_count; | 740 | static int c3_cpu_count; |
| @@ -809,44 +742,14 @@ static DEFINE_RAW_SPINLOCK(c3_lock); | |||
| 809 | 742 | ||
| 810 | /** | 743 | /** |
| 811 | * acpi_idle_enter_bm - enters C3 with proper BM handling | 744 | * acpi_idle_enter_bm - enters C3 with proper BM handling |
| 812 | * @dev: the target CPU | 745 | * @pr: Target processor |
| 813 | * @drv: cpuidle driver containing state data | 746 | * @cx: Target state context |
| 814 | * @index: the index of suggested state | ||
| 815 | * | ||
| 816 | * If BM is detected, the deepest non-C3 idle state is entered instead. | ||
| 817 | */ | 747 | */ |
| 818 | static int acpi_idle_enter_bm(struct cpuidle_device *dev, | 748 | static void acpi_idle_enter_bm(struct acpi_processor *pr, |
| 819 | struct cpuidle_driver *drv, int index) | 749 | struct acpi_processor_cx *cx) |
| 820 | { | 750 | { |
| 821 | struct acpi_processor *pr; | ||
| 822 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); | ||
| 823 | |||
| 824 | pr = __this_cpu_read(processors); | ||
| 825 | |||
| 826 | if (unlikely(!pr)) | ||
| 827 | return -EINVAL; | ||
| 828 | |||
| 829 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 830 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && | ||
| 831 | !pr->flags.has_cst && | ||
| 832 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | ||
| 833 | return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START); | ||
| 834 | #endif | ||
| 835 | |||
| 836 | if (!cx->bm_sts_skip && acpi_idle_bm_check()) { | ||
| 837 | if (drv->safe_state_index >= 0) { | ||
| 838 | return drv->states[drv->safe_state_index].enter(dev, | ||
| 839 | drv, drv->safe_state_index); | ||
| 840 | } else { | ||
| 841 | acpi_safe_halt(); | ||
| 842 | return -EBUSY; | ||
| 843 | } | ||
| 844 | } | ||
| 845 | |||
| 846 | acpi_unlazy_tlb(smp_processor_id()); | 751 | acpi_unlazy_tlb(smp_processor_id()); |
| 847 | 752 | ||
| 848 | /* Tell the scheduler that we are going deep-idle: */ | ||
| 849 | sched_clock_idle_sleep_event(); | ||
| 850 | /* | 753 | /* |
| 851 | * Must be done before busmaster disable as we might need to | 754 | * Must be done before busmaster disable as we might need to |
| 852 | * access HPET ! | 755 | * access HPET ! |
| @@ -856,37 +759,71 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
| 856 | /* | 759 | /* |
| 857 | * disable bus master | 760 | * disable bus master |
| 858 | * bm_check implies we need ARB_DIS | 761 | * bm_check implies we need ARB_DIS |
| 859 | * !bm_check implies we need cache flush | ||
| 860 | * bm_control implies whether we can do ARB_DIS | 762 | * bm_control implies whether we can do ARB_DIS |
| 861 | * | 763 | * |
| 862 | * That leaves a case where bm_check is set and bm_control is | 764 | * That leaves a case where bm_check is set and bm_control is |
| 863 | * not set. In that case we cannot do much, we enter C3 | 765 | * not set. In that case we cannot do much, we enter C3 |
| 864 | * without doing anything. | 766 | * without doing anything. |
| 865 | */ | 767 | */ |
| 866 | if (pr->flags.bm_check && pr->flags.bm_control) { | 768 | if (pr->flags.bm_control) { |
| 867 | raw_spin_lock(&c3_lock); | 769 | raw_spin_lock(&c3_lock); |
| 868 | c3_cpu_count++; | 770 | c3_cpu_count++; |
| 869 | /* Disable bus master arbitration when all CPUs are in C3 */ | 771 | /* Disable bus master arbitration when all CPUs are in C3 */ |
| 870 | if (c3_cpu_count == num_online_cpus()) | 772 | if (c3_cpu_count == num_online_cpus()) |
| 871 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); | 773 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); |
| 872 | raw_spin_unlock(&c3_lock); | 774 | raw_spin_unlock(&c3_lock); |
| 873 | } else if (!pr->flags.bm_check) { | ||
| 874 | ACPI_FLUSH_CPU_CACHE(); | ||
| 875 | } | 775 | } |
| 876 | 776 | ||
| 877 | acpi_idle_do_entry(cx); | 777 | acpi_idle_do_entry(cx); |
| 878 | 778 | ||
| 879 | /* Re-enable bus master arbitration */ | 779 | /* Re-enable bus master arbitration */ |
| 880 | if (pr->flags.bm_check && pr->flags.bm_control) { | 780 | if (pr->flags.bm_control) { |
| 881 | raw_spin_lock(&c3_lock); | 781 | raw_spin_lock(&c3_lock); |
| 882 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); | 782 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); |
| 883 | c3_cpu_count--; | 783 | c3_cpu_count--; |
| 884 | raw_spin_unlock(&c3_lock); | 784 | raw_spin_unlock(&c3_lock); |
| 885 | } | 785 | } |
| 886 | 786 | ||
| 887 | sched_clock_idle_wakeup_event(0); | 787 | lapic_timer_state_broadcast(pr, cx, 0); |
| 788 | } | ||
| 789 | |||
| 790 | static int acpi_idle_enter(struct cpuidle_device *dev, | ||
| 791 | struct cpuidle_driver *drv, int index) | ||
| 792 | { | ||
| 793 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); | ||
| 794 | struct acpi_processor *pr; | ||
| 795 | |||
| 796 | pr = __this_cpu_read(processors); | ||
| 797 | if (unlikely(!pr)) | ||
| 798 | return -EINVAL; | ||
| 799 | |||
| 800 | if (cx->type != ACPI_STATE_C1) { | ||
| 801 | if (acpi_idle_fallback_to_c1(pr)) { | ||
| 802 | index = CPUIDLE_DRIVER_STATE_START; | ||
| 803 | cx = per_cpu(acpi_cstate[index], dev->cpu); | ||
| 804 | } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) { | ||
| 805 | if (cx->bm_sts_skip || !acpi_idle_bm_check()) { | ||
| 806 | acpi_idle_enter_bm(pr, cx); | ||
| 807 | return index; | ||
| 808 | } else if (drv->safe_state_index >= 0) { | ||
| 809 | index = drv->safe_state_index; | ||
| 810 | cx = per_cpu(acpi_cstate[index], dev->cpu); | ||
| 811 | } else { | ||
| 812 | acpi_safe_halt(); | ||
| 813 | return -EBUSY; | ||
| 814 | } | ||
| 815 | } | ||
| 816 | } | ||
| 817 | |||
| 818 | lapic_timer_state_broadcast(pr, cx, 1); | ||
| 819 | |||
| 820 | if (cx->type == ACPI_STATE_C3) | ||
| 821 | ACPI_FLUSH_CPU_CACHE(); | ||
| 822 | |||
| 823 | acpi_idle_do_entry(cx); | ||
| 888 | 824 | ||
| 889 | lapic_timer_state_broadcast(pr, cx, 0); | 825 | lapic_timer_state_broadcast(pr, cx, 0); |
| 826 | |||
| 890 | return index; | 827 | return index; |
| 891 | } | 828 | } |
| 892 | 829 | ||
| @@ -981,27 +918,12 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) | |||
| 981 | strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); | 918 | strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); |
| 982 | state->exit_latency = cx->latency; | 919 | state->exit_latency = cx->latency; |
| 983 | state->target_residency = cx->latency * latency_factor; | 920 | state->target_residency = cx->latency * latency_factor; |
| 921 | state->enter = acpi_idle_enter; | ||
| 984 | 922 | ||
| 985 | state->flags = 0; | 923 | state->flags = 0; |
| 986 | switch (cx->type) { | 924 | if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) { |
| 987 | case ACPI_STATE_C1: | ||
| 988 | |||
| 989 | state->enter = acpi_idle_enter_c1; | ||
| 990 | state->enter_dead = acpi_idle_play_dead; | ||
| 991 | drv->safe_state_index = count; | ||
| 992 | break; | ||
| 993 | |||
| 994 | case ACPI_STATE_C2: | ||
| 995 | state->enter = acpi_idle_enter_simple; | ||
| 996 | state->enter_dead = acpi_idle_play_dead; | 925 | state->enter_dead = acpi_idle_play_dead; |
| 997 | drv->safe_state_index = count; | 926 | drv->safe_state_index = count; |
| 998 | break; | ||
| 999 | |||
| 1000 | case ACPI_STATE_C3: | ||
| 1001 | state->enter = pr->flags.bm_check ? | ||
| 1002 | acpi_idle_enter_bm : | ||
| 1003 | acpi_idle_enter_simple; | ||
| 1004 | break; | ||
| 1005 | } | 927 | } |
| 1006 | 928 | ||
| 1007 | count++; | 929 | count++; |
