aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-12-12 09:08:36 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2013-12-19 14:54:44 -0500
commit16824255394f55adf31b9a96a9965d8c15bdac4c (patch)
treeb28f4a2d59db25700c8b4eafb2e3cfc188c121bc
parent40e2d7f9b5dae048789c64672bf3027fbb663ffa (diff)
x86, acpi, idle: Restructure the mwait idle routines
People seem to delight in writing wrong and broken mwait idle routines; collapse the lot. This leaves mwait_play_dead() the sole remaining user of __mwait() and new __mwait() users are probably doing it wrong. Also remove __sti_mwait() as its unused. Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Jacob Jun Pan <jacob.jun.pan@linux.intel.com> Cc: Mike Galbraith <bitbucket@online.de> Cc: Len Brown <lenb@kernel.org> Cc: Rui Zhang <rui.zhang@intel.com> Acked-by: Rafael Wysocki <rafael.j.wysocki@intel.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20131212141654.616820819@infradead.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/x86/include/asm/mwait.h40
-rw-r--r--arch/x86/include/asm/processor.h23
-rw-r--r--arch/x86/kernel/acpi/cstate.c23
-rw-r--r--drivers/acpi/acpi_pad.c5
-rw-r--r--drivers/acpi/processor_idle.c15
-rw-r--r--drivers/idle/intel_idle.c11
-rw-r--r--drivers/thermal/intel_powerclamp.c4
7 files changed, 43 insertions, 78 deletions
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index 2f366d0ac6b4..361b02ef128c 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_MWAIT_H 1#ifndef _ASM_X86_MWAIT_H
2#define _ASM_X86_MWAIT_H 2#define _ASM_X86_MWAIT_H
3 3
4#include <linux/sched.h>
5
4#define MWAIT_SUBSTATE_MASK 0xf 6#define MWAIT_SUBSTATE_MASK 0xf
5#define MWAIT_CSTATE_MASK 0xf 7#define MWAIT_CSTATE_MASK 0xf
6#define MWAIT_SUBSTATE_SIZE 4 8#define MWAIT_SUBSTATE_SIZE 4
@@ -13,4 +15,42 @@
13 15
14#define MWAIT_ECX_INTERRUPT_BREAK 0x1 16#define MWAIT_ECX_INTERRUPT_BREAK 0x1
15 17
18static inline void __monitor(const void *eax, unsigned long ecx,
19 unsigned long edx)
20{
21 /* "monitor %eax, %ecx, %edx;" */
22 asm volatile(".byte 0x0f, 0x01, 0xc8;"
23 :: "a" (eax), "c" (ecx), "d"(edx));
24}
25
26static inline void __mwait(unsigned long eax, unsigned long ecx)
27{
28 /* "mwait %eax, %ecx;" */
29 asm volatile(".byte 0x0f, 0x01, 0xc9;"
30 :: "a" (eax), "c" (ecx));
31}
32
33/*
34 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
35 * which can obviate IPI to trigger checking of need_resched.
36 * We execute MONITOR against need_resched and enter optimized wait state
37 * through MWAIT. Whenever someone changes need_resched, we would be woken
38 * up from MWAIT (without an IPI).
39 *
40 * New with Core Duo processors, MWAIT can take some hints based on CPU
41 * capability.
42 */
43static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
44{
45 if (!current_set_polling_and_test()) {
46 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
47 clflush((void *)&current_thread_info()->flags);
48
49 __monitor((void *)&current_thread_info()->flags, 0, 0);
50 if (!need_resched())
51 __mwait(eax, ecx);
52 }
53 __current_clr_polling();
54}
55
16#endif /* _ASM_X86_MWAIT_H */ 56#endif /* _ASM_X86_MWAIT_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 7b034a4057f9..24821f5768bc 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -700,29 +700,6 @@ static inline void sync_core(void)
700#endif 700#endif
701} 701}
702 702
703static inline void __monitor(const void *eax, unsigned long ecx,
704 unsigned long edx)
705{
706 /* "monitor %eax, %ecx, %edx;" */
707 asm volatile(".byte 0x0f, 0x01, 0xc8;"
708 :: "a" (eax), "c" (ecx), "d"(edx));
709}
710
711static inline void __mwait(unsigned long eax, unsigned long ecx)
712{
713 /* "mwait %eax, %ecx;" */
714 asm volatile(".byte 0x0f, 0x01, 0xc9;"
715 :: "a" (eax), "c" (ecx));
716}
717
718static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
719{
720 trace_hardirqs_on();
721 /* "mwait %eax, %ecx;" */
722 asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
723 :: "a" (eax), "c" (ecx));
724}
725
726extern void select_idle_routine(const struct cpuinfo_x86 *c); 703extern void select_idle_routine(const struct cpuinfo_x86 *c);
727extern void init_amd_e400_c1e_mask(void); 704extern void init_amd_e400_c1e_mask(void);
728 705
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index d2b7f27781bc..e69182fd01cf 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -150,29 +150,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
150} 150}
151EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); 151EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
152 152
153/*
154 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
155 * which can obviate IPI to trigger checking of need_resched.
156 * We execute MONITOR against need_resched and enter optimized wait state
157 * through MWAIT. Whenever someone changes need_resched, we would be woken
158 * up from MWAIT (without an IPI).
159 *
160 * New with Core Duo processors, MWAIT can take some hints based on CPU
161 * capability.
162 */
163void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
164{
165 if (!need_resched()) {
166 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
167 clflush((void *)&current_thread_info()->flags);
168
169 __monitor((void *)&current_thread_info()->flags, 0, 0);
170 smp_mb();
171 if (!need_resched())
172 __mwait(ax, cx);
173 }
174}
175
176void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) 153void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
177{ 154{
178 unsigned int cpu = smp_processor_id(); 155 unsigned int cpu = smp_processor_id();
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index fc6008fbce35..509452a62f96 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -193,10 +193,7 @@ static int power_saving_thread(void *data)
193 CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); 193 CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
194 stop_critical_timings(); 194 stop_critical_timings();
195 195
196 __monitor((void *)&current_thread_info()->flags, 0, 0); 196 mwait_idle_with_hints(power_saving_mwait_eax, 1);
197 smp_mb();
198 if (!need_resched())
199 __mwait(power_saving_mwait_eax, 1);
200 197
201 start_critical_timings(); 198 start_critical_timings();
202 if (lapic_marked_unstable) 199 if (lapic_marked_unstable)
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 644516d9bde6..f90c56c8379e 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -727,11 +727,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
727 if (unlikely(!pr)) 727 if (unlikely(!pr))
728 return -EINVAL; 728 return -EINVAL;
729 729
730 if (cx->entry_method == ACPI_CSTATE_FFH) {
731 if (current_set_polling_and_test())
732 return -EINVAL;
733 }
734
735 lapic_timer_state_broadcast(pr, cx, 1); 730 lapic_timer_state_broadcast(pr, cx, 1);
736 acpi_idle_do_entry(cx); 731 acpi_idle_do_entry(cx);
737 732
@@ -785,11 +780,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
785 if (unlikely(!pr)) 780 if (unlikely(!pr))
786 return -EINVAL; 781 return -EINVAL;
787 782
788 if (cx->entry_method == ACPI_CSTATE_FFH) {
789 if (current_set_polling_and_test())
790 return -EINVAL;
791 }
792
793 /* 783 /*
794 * Must be done before busmaster disable as we might need to 784 * Must be done before busmaster disable as we might need to
795 * access HPET ! 785 * access HPET !
@@ -841,11 +831,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
841 } 831 }
842 } 832 }
843 833
844 if (cx->entry_method == ACPI_CSTATE_FFH) {
845 if (current_set_polling_and_test())
846 return -EINVAL;
847 }
848
849 acpi_unlazy_tlb(smp_processor_id()); 834 acpi_unlazy_tlb(smp_processor_id());
850 835
851 /* Tell the scheduler that we are going deep-idle: */ 836 /* Tell the scheduler that we are going deep-idle: */
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index f80b700f821c..efec4055fd5e 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -375,16 +375,7 @@ static int intel_idle(struct cpuidle_device *dev,
375 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 375 if (!(lapic_timer_reliable_states & (1 << (cstate))))
376 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); 376 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
377 377
378 if (!current_set_polling_and_test()) { 378 mwait_idle_with_hints(eax, ecx);
379
380 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
381 clflush((void *)&current_thread_info()->flags);
382
383 __monitor((void *)&current_thread_info()->flags, 0, 0);
384 smp_mb();
385 if (!need_resched())
386 __mwait(eax, ecx);
387 }
388 379
389 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 380 if (!(lapic_timer_reliable_states & (1 << (cstate))))
390 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); 381 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 8f181b3f842b..e8275f2df9af 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -438,9 +438,7 @@ static int clamp_thread(void *arg)
438 */ 438 */
439 local_touch_nmi(); 439 local_touch_nmi();
440 stop_critical_timings(); 440 stop_critical_timings();
441 __monitor((void *)&current_thread_info()->flags, 0, 0); 441 mwait_idle_with_hints(eax, ecx);
442 cpu_relax(); /* allow HT sibling to run */
443 __mwait(eax, ecx);
444 start_critical_timings(); 442 start_critical_timings();
445 atomic_inc(&idle_wakeup_counter); 443 atomic_inc(&idle_wakeup_counter);
446 } 444 }