aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2010-05-24 14:27:44 -0400
committerLen Brown <len.brown@intel.com>2010-05-28 13:58:57 -0400
commit02cf4f9808382af7265cafc33dc86ec5875526aa (patch)
tree6e4b7fc98a125e6af6011ee04e5fbf2d2fe38c4f /drivers/acpi
parenta7d27c37533524e23bbdc9ba1d78e3241b7483ea (diff)
ACPI: acpi_idle: touch TS_POLLING only in the non-MWAIT case
commit d306ebc28649b89877a22158fe0076f06cc46f60 (ACPI: Be in TS_POLLING state during mwait based C-state entry) fixed an important power & performance issue where ACPI c2 and c3 C-states were clearing TS_POLLING even when using MWAIT (ACPI_STATE_FFH). That bug had been causing us to receive redundant scheduling interrups when we had already been woken up by MONITOR/MWAIT. Following up on that... In the MWAIT case, we don't have to subsequently check need_resched(), as that c heck was there for the TS_POLLING-clearing case. Note that not only does the cpuidle calling function already check need_resched() before calling us, the low-level entry into monitor/mwait calls it twice -- guaranteeing that a write to the trigger address can not go un-noticed. Also, in this case, we don't have to set TS_POLLING when we wake, because we never cleared it. Signed-off-by: Len Brown <len.brown@intel.com> Acked-by: Venkatesh Pallipadi <venki@google.com>
Diffstat (limited to 'drivers/acpi')
-rw-r--r--drivers/acpi/processor_idle.c28
1 files changed, 16 insertions, 12 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 5939e7f7d8e9..a4166e2abb92 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -881,6 +881,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
881 return(acpi_idle_enter_c1(dev, state)); 881 return(acpi_idle_enter_c1(dev, state));
882 882
883 local_irq_disable(); 883 local_irq_disable();
884
884 if (cx->entry_method != ACPI_CSTATE_FFH) { 885 if (cx->entry_method != ACPI_CSTATE_FFH) {
885 current_thread_info()->status &= ~TS_POLLING; 886 current_thread_info()->status &= ~TS_POLLING;
886 /* 887 /*
@@ -888,12 +889,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
888 * NEED_RESCHED: 889 * NEED_RESCHED:
889 */ 890 */
890 smp_mb(); 891 smp_mb();
891 }
892 892
893 if (unlikely(need_resched())) { 893 if (unlikely(need_resched())) {
894 current_thread_info()->status |= TS_POLLING; 894 current_thread_info()->status |= TS_POLLING;
895 local_irq_enable(); 895 local_irq_enable();
896 return 0; 896 return 0;
897 }
897 } 898 }
898 899
899 /* 900 /*
@@ -918,7 +919,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
918 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 919 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
919 920
920 local_irq_enable(); 921 local_irq_enable();
921 current_thread_info()->status |= TS_POLLING; 922 if (cx->entry_method != ACPI_CSTATE_FFH)
923 current_thread_info()->status |= TS_POLLING;
922 924
923 cx->usage++; 925 cx->usage++;
924 926
@@ -968,6 +970,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
968 } 970 }
969 971
970 local_irq_disable(); 972 local_irq_disable();
973
971 if (cx->entry_method != ACPI_CSTATE_FFH) { 974 if (cx->entry_method != ACPI_CSTATE_FFH) {
972 current_thread_info()->status &= ~TS_POLLING; 975 current_thread_info()->status &= ~TS_POLLING;
973 /* 976 /*
@@ -975,12 +978,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
975 * NEED_RESCHED: 978 * NEED_RESCHED:
976 */ 979 */
977 smp_mb(); 980 smp_mb();
978 }
979 981
980 if (unlikely(need_resched())) { 982 if (unlikely(need_resched())) {
981 current_thread_info()->status |= TS_POLLING; 983 current_thread_info()->status |= TS_POLLING;
982 local_irq_enable(); 984 local_irq_enable();
983 return 0; 985 return 0;
986 }
984 } 987 }
985 988
986 acpi_unlazy_tlb(smp_processor_id()); 989 acpi_unlazy_tlb(smp_processor_id());
@@ -1032,7 +1035,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1032 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 1035 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1033 1036
1034 local_irq_enable(); 1037 local_irq_enable();
1035 current_thread_info()->status |= TS_POLLING; 1038 if (cx->entry_method != ACPI_CSTATE_FFH)
1039 current_thread_info()->status |= TS_POLLING;
1036 1040
1037 cx->usage++; 1041 cx->usage++;
1038 1042