aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-02-10 23:04:57 -0500
committerRafael J. Wysocki <rjw@rjwysocki.net>2015-02-15 13:40:10 -0500
commit5f5081852038d9a7b309190730bfb724b413235e (patch)
tree03993aaa509ee38a7a41832ec9c29c80a7eaa711
parent5fe2e52720e7a62da956d8aa81eadf6959c7acc8 (diff)
ACPI / idle: Implement ->enter_freeze callback routine
Add an ->enter_freeze callback routine, acpi_idle_enter_freeze(), to the ACPI cpuidle driver and point ->enter_freeze to it for all the C2-type and C3-type states that don't need to fall back to C1 (which may be halt-induced and that will re-enable interrupts on exit from idle, which ->enter_freeze cannot do). Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-rw-r--r--drivers/acpi/processor_idle.c48
1 files changed, 40 insertions, 8 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index c256bd7fbd78..c6bb9f1257c9 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -732,9 +732,8 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
732 732
733static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) 733static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
734{ 734{
735 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && num_online_cpus() > 1 && 735 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
736 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED) && 736 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
737 !pr->flags.has_cst;
738} 737}
739 738
740static int c3_cpu_count; 739static int c3_cpu_count;
@@ -744,9 +743,10 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
744 * acpi_idle_enter_bm - enters C3 with proper BM handling 743 * acpi_idle_enter_bm - enters C3 with proper BM handling
745 * @pr: Target processor 744 * @pr: Target processor
746 * @cx: Target state context 745 * @cx: Target state context
746 * @timer_bc: Whether or not to change timer mode to broadcast
747 */ 747 */
748static void acpi_idle_enter_bm(struct acpi_processor *pr, 748static void acpi_idle_enter_bm(struct acpi_processor *pr,
749 struct acpi_processor_cx *cx) 749 struct acpi_processor_cx *cx, bool timer_bc)
750{ 750{
751 acpi_unlazy_tlb(smp_processor_id()); 751 acpi_unlazy_tlb(smp_processor_id());
752 752
@@ -754,7 +754,8 @@ static void acpi_idle_enter_bm(struct acpi_processor *pr,
754 * Must be done before busmaster disable as we might need to 754 * Must be done before busmaster disable as we might need to
755 * access HPET ! 755 * access HPET !
756 */ 756 */
757 lapic_timer_state_broadcast(pr, cx, 1); 757 if (timer_bc)
758 lapic_timer_state_broadcast(pr, cx, 1);
758 759
759 /* 760 /*
760 * disable bus master 761 * disable bus master
@@ -784,7 +785,8 @@ static void acpi_idle_enter_bm(struct acpi_processor *pr,
784 raw_spin_unlock(&c3_lock); 785 raw_spin_unlock(&c3_lock);
785 } 786 }
786 787
787 lapic_timer_state_broadcast(pr, cx, 0); 788 if (timer_bc)
789 lapic_timer_state_broadcast(pr, cx, 0);
788} 790}
789 791
790static int acpi_idle_enter(struct cpuidle_device *dev, 792static int acpi_idle_enter(struct cpuidle_device *dev,
@@ -798,12 +800,12 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
798 return -EINVAL; 800 return -EINVAL;
799 801
800 if (cx->type != ACPI_STATE_C1) { 802 if (cx->type != ACPI_STATE_C1) {
801 if (acpi_idle_fallback_to_c1(pr)) { 803 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
802 index = CPUIDLE_DRIVER_STATE_START; 804 index = CPUIDLE_DRIVER_STATE_START;
803 cx = per_cpu(acpi_cstate[index], dev->cpu); 805 cx = per_cpu(acpi_cstate[index], dev->cpu);
804 } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) { 806 } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
805 if (cx->bm_sts_skip || !acpi_idle_bm_check()) { 807 if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
806 acpi_idle_enter_bm(pr, cx); 808 acpi_idle_enter_bm(pr, cx, true);
807 return index; 809 return index;
808 } else if (drv->safe_state_index >= 0) { 810 } else if (drv->safe_state_index >= 0) {
809 index = drv->safe_state_index; 811 index = drv->safe_state_index;
@@ -827,6 +829,27 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
827 return index; 829 return index;
828} 830}
829 831
832static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
833 struct cpuidle_driver *drv, int index)
834{
835 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
836
837 if (cx->type == ACPI_STATE_C3) {
838 struct acpi_processor *pr = __this_cpu_read(processors);
839
840 if (unlikely(!pr))
841 return;
842
843 if (pr->flags.bm_check) {
844 acpi_idle_enter_bm(pr, cx, false);
845 return;
846 } else {
847 ACPI_FLUSH_CPU_CACHE();
848 }
849 }
850 acpi_idle_do_entry(cx);
851}
852
830struct cpuidle_driver acpi_idle_driver = { 853struct cpuidle_driver acpi_idle_driver = {
831 .name = "acpi_idle", 854 .name = "acpi_idle",
832 .owner = THIS_MODULE, 855 .owner = THIS_MODULE,
@@ -925,6 +948,15 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
925 state->enter_dead = acpi_idle_play_dead; 948 state->enter_dead = acpi_idle_play_dead;
926 drv->safe_state_index = count; 949 drv->safe_state_index = count;
927 } 950 }
951 /*
952 * Halt-induced C1 is not good for ->enter_freeze, because it
953 * re-enables interrupts on exit. Moreover, C1 is generally not
954 * particularly interesting from the suspend-to-idle angle, so
955 * avoid C1 and the situations in which we may need to fall back
956 * to it altogether.
957 */
958 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
959 state->enter_freeze = acpi_idle_enter_freeze;
928 960
929 count++; 961 count++;
930 if (count == CPUIDLE_STATE_MAX) 962 if (count == CPUIDLE_STATE_MAX)