aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-02-03 15:55:11 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-02-03 15:55:11 -0500
commit6491bc0c616969f7ae1fcb30a8823c333e2944c7 (patch)
tree8f055537c917f4caa8ec9ccc6b764e4224b21418 /drivers
parentd2cecb3d66e39765f3dec8adfb88c322e709b06d (diff)
ACPI / cpuidle: Common callback routine for entering states
Introduce a common ->enter callback routine for the ACPI cpuidle driver, acpi_idle_enter(), which helps to reduce code complexity, size and duplication and prevents theoretically possible failues that an incorrect routine may be run to enter the given idle state due to a firmware bug (eg. when _CST returns a different set of states for each processor). Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/processor_idle.c118
1 files changed, 45 insertions, 73 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 58113a6fa1d3..c256bd7fbd78 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -737,74 +737,17 @@ static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
737 !pr->flags.has_cst; 737 !pr->flags.has_cst;
738} 738}
739 739
740/**
741 * acpi_idle_enter_simple - enters a CPU idle state without BM handling
742 * @dev: the target CPU
743 * @drv: cpuidle driver with cpuidle state information
744 * @index: the index of suggested state
745 */
746static int acpi_idle_enter_simple(struct cpuidle_device *dev,
747 struct cpuidle_driver *drv, int index)
748{
749 struct acpi_processor *pr;
750 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
751
752 pr = __this_cpu_read(processors);
753
754 if (unlikely(!pr))
755 return -EINVAL;
756
757 if (cx->type != ACPI_STATE_C1 && acpi_idle_fallback_to_c1(pr)) {
758 index = CPUIDLE_DRIVER_STATE_START;
759 cx = per_cpu(acpi_cstate[index], dev->cpu);
760 }
761
762 lapic_timer_state_broadcast(pr, cx, 1);
763
764 if (cx->type == ACPI_STATE_C3)
765 ACPI_FLUSH_CPU_CACHE();
766
767 acpi_idle_do_entry(cx);
768
769 lapic_timer_state_broadcast(pr, cx, 0);
770 return index;
771}
772
773static int c3_cpu_count; 740static int c3_cpu_count;
774static DEFINE_RAW_SPINLOCK(c3_lock); 741static DEFINE_RAW_SPINLOCK(c3_lock);
775 742
776/** 743/**
777 * acpi_idle_enter_bm - enters C3 with proper BM handling 744 * acpi_idle_enter_bm - enters C3 with proper BM handling
778 * @dev: the target CPU 745 * @pr: Target processor
779 * @drv: cpuidle driver containing state data 746 * @cx: Target state context
780 * @index: the index of suggested state
781 *
782 * If BM is detected, the deepest non-C3 idle state is entered instead.
783 */ 747 */
784static int acpi_idle_enter_bm(struct cpuidle_device *dev, 748static void acpi_idle_enter_bm(struct acpi_processor *pr,
785 struct cpuidle_driver *drv, int index) 749 struct acpi_processor_cx *cx)
786{ 750{
787 struct acpi_processor *pr;
788 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
789
790 pr = __this_cpu_read(processors);
791
792 if (unlikely(!pr))
793 return -EINVAL;
794
795 if (acpi_idle_fallback_to_c1(pr))
796 return acpi_idle_enter_simple(dev, drv, CPUIDLE_DRIVER_STATE_START);
797
798 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
799 if (drv->safe_state_index >= 0) {
800 return drv->states[drv->safe_state_index].enter(dev,
801 drv, drv->safe_state_index);
802 } else {
803 acpi_safe_halt();
804 return -EBUSY;
805 }
806 }
807
808 acpi_unlazy_tlb(smp_processor_id()); 751 acpi_unlazy_tlb(smp_processor_id());
809 752
810 /* 753 /*
@@ -842,6 +785,45 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
842 } 785 }
843 786
844 lapic_timer_state_broadcast(pr, cx, 0); 787 lapic_timer_state_broadcast(pr, cx, 0);
788}
789
790static int acpi_idle_enter(struct cpuidle_device *dev,
791 struct cpuidle_driver *drv, int index)
792{
793 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
794 struct acpi_processor *pr;
795
796 pr = __this_cpu_read(processors);
797 if (unlikely(!pr))
798 return -EINVAL;
799
800 if (cx->type != ACPI_STATE_C1) {
801 if (acpi_idle_fallback_to_c1(pr)) {
802 index = CPUIDLE_DRIVER_STATE_START;
803 cx = per_cpu(acpi_cstate[index], dev->cpu);
804 } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
805 if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
806 acpi_idle_enter_bm(pr, cx);
807 return index;
808 } else if (drv->safe_state_index >= 0) {
809 index = drv->safe_state_index;
810 cx = per_cpu(acpi_cstate[index], dev->cpu);
811 } else {
812 acpi_safe_halt();
813 return -EBUSY;
814 }
815 }
816 }
817
818 lapic_timer_state_broadcast(pr, cx, 1);
819
820 if (cx->type == ACPI_STATE_C3)
821 ACPI_FLUSH_CPU_CACHE();
822
823 acpi_idle_do_entry(cx);
824
825 lapic_timer_state_broadcast(pr, cx, 0);
826
845 return index; 827 return index;
846} 828}
847 829
@@ -936,22 +918,12 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
936 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 918 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
937 state->exit_latency = cx->latency; 919 state->exit_latency = cx->latency;
938 state->target_residency = cx->latency * latency_factor; 920 state->target_residency = cx->latency * latency_factor;
921 state->enter = acpi_idle_enter;
939 922
940 state->flags = 0; 923 state->flags = 0;
941 switch (cx->type) { 924 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
942
943 case ACPI_STATE_C1:
944 case ACPI_STATE_C2:
945 state->enter = acpi_idle_enter_simple;
946 state->enter_dead = acpi_idle_play_dead; 925 state->enter_dead = acpi_idle_play_dead;
947 drv->safe_state_index = count; 926 drv->safe_state_index = count;
948 break;
949
950 case ACPI_STATE_C3:
951 state->enter = pr->flags.bm_check ?
952 acpi_idle_enter_bm :
953 acpi_idle_enter_simple;
954 break;
955 } 927 }
956 928
957 count++; 929 count++;