aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r--arch/x86/kernel/process.c190
1 files changed, 173 insertions, 17 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index ba370dc8685b..4061d63aabe7 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -6,6 +6,7 @@
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/pm.h> 8#include <linux/pm.h>
9#include <linux/clockchips.h>
9 10
10struct kmem_cache *task_xstate_cachep; 11struct kmem_cache *task_xstate_cachep;
11 12
@@ -45,6 +46,76 @@ void arch_task_cache_init(void)
45 SLAB_PANIC, NULL); 46 SLAB_PANIC, NULL);
46} 47}
47 48
49/*
50 * Idle related variables and functions
51 */
52unsigned long boot_option_idle_override = 0;
53EXPORT_SYMBOL(boot_option_idle_override);
54
55/*
56 * Powermanagement idle function, if any..
57 */
58void (*pm_idle)(void);
59EXPORT_SYMBOL(pm_idle);
60
61#ifdef CONFIG_X86_32
62/*
63 * This halt magic was a workaround for ancient floppy DMA
64 * wreckage. It should be safe to remove.
65 */
66static int hlt_counter;
67void disable_hlt(void)
68{
69 hlt_counter++;
70}
71EXPORT_SYMBOL(disable_hlt);
72
73void enable_hlt(void)
74{
75 hlt_counter--;
76}
77EXPORT_SYMBOL(enable_hlt);
78
79static inline int hlt_use_halt(void)
80{
81 return (!hlt_counter && boot_cpu_data.hlt_works_ok);
82}
83#else
84static inline int hlt_use_halt(void)
85{
86 return 1;
87}
88#endif
89
90/*
91 * We use this if we don't have any better
92 * idle routine..
93 */
94void default_idle(void)
95{
96 if (hlt_use_halt()) {
97 current_thread_info()->status &= ~TS_POLLING;
98 /*
99 * TS_POLLING-cleared state must be visible before we
100 * test NEED_RESCHED:
101 */
102 smp_mb();
103
104 if (!need_resched())
105 safe_halt(); /* enables interrupts racelessly */
106 else
107 local_irq_enable();
108 current_thread_info()->status |= TS_POLLING;
109 } else {
110 local_irq_enable();
111 /* loop is done by the caller */
112 cpu_relax();
113 }
114}
115#ifdef CONFIG_APM_MODULE
116EXPORT_SYMBOL(default_idle);
117#endif
118
48static void do_nothing(void *unused) 119static void do_nothing(void *unused)
49{ 120{
50} 121}
@@ -122,44 +193,129 @@ static void poll_idle(void)
122 * 193 *
123 * idle=mwait overrides this decision and forces the usage of mwait. 194 * idle=mwait overrides this decision and forces the usage of mwait.
124 */ 195 */
196
197#define MWAIT_INFO 0x05
198#define MWAIT_ECX_EXTENDED_INFO 0x01
199#define MWAIT_EDX_C1 0xf0
200
125static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) 201static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
126{ 202{
203 u32 eax, ebx, ecx, edx;
204
127 if (force_mwait) 205 if (force_mwait)
128 return 1; 206 return 1;
129 207
130 if (c->x86_vendor == X86_VENDOR_AMD) { 208 if (c->cpuid_level < MWAIT_INFO)
131 switch(c->x86) { 209 return 0;
132 case 0x10: 210
133 case 0x11: 211 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
134 return 0; 212 /* Check, whether EDX has extended info about MWAIT */
135 } 213 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
136 } 214 return 1;
215
216 /*
217 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
218 * C1 supports MWAIT
219 */
220 return (edx & MWAIT_EDX_C1);
221}
222
223/*
224 * Check for AMD CPUs, which have potentially C1E support
225 */
226static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
227{
228 if (c->x86_vendor != X86_VENDOR_AMD)
229 return 0;
230
231 if (c->x86 < 0x0F)
232 return 0;
233
234 /* Family 0x0f models < rev F do not have C1E */
235 if (c->x86 == 0x0f && c->x86_model < 0x40)
236 return 0;
237
137 return 1; 238 return 1;
138} 239}
139 240
140void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 241/*
242 * C1E aware idle routine. We check for C1E active in the interrupt
243 * pending message MSR. If we detect C1E, then we handle it the same
244 * way as C3 power states (local apic timer and TSC stop)
245 */
246static void c1e_idle(void)
141{ 247{
142 static int selected; 248 static cpumask_t c1e_mask = CPU_MASK_NONE;
249 static int c1e_detected;
143 250
144 if (selected) 251 if (need_resched())
145 return; 252 return;
253
254 if (!c1e_detected) {
255 u32 lo, hi;
256
257 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
258 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
259 c1e_detected = 1;
260 mark_tsc_unstable("TSC halt in C1E");
261 printk(KERN_INFO "System has C1E enabled\n");
262 }
263 }
264
265 if (c1e_detected) {
266 int cpu = smp_processor_id();
267
268 if (!cpu_isset(cpu, c1e_mask)) {
269 cpu_set(cpu, c1e_mask);
270 /*
271 * Force broadcast so ACPI can not interfere. Needs
272 * to run with interrupts enabled as it uses
273 * smp_function_call.
274 */
275 local_irq_enable();
276 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
277 &cpu);
278 printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
279 cpu);
280 local_irq_disable();
281 }
282 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
283
284 default_idle();
285
286 /*
287 * The switch back from broadcast mode needs to be
288 * called with interrupts disabled.
289 */
290 local_irq_disable();
291 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
292 local_irq_enable();
293 } else
294 default_idle();
295}
296
297void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
298{
146#ifdef CONFIG_X86_SMP 299#ifdef CONFIG_X86_SMP
147 if (pm_idle == poll_idle && smp_num_siblings > 1) { 300 if (pm_idle == poll_idle && smp_num_siblings > 1) {
148 printk(KERN_WARNING "WARNING: polling idle and HT enabled," 301 printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
149 " performance may degrade.\n"); 302 " performance may degrade.\n");
150 } 303 }
151#endif 304#endif
305 if (pm_idle)
306 return;
307
152 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { 308 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
153 /* 309 /*
154 * Skip, if setup has overridden idle.
155 * One CPU supports mwait => All CPUs supports mwait 310 * One CPU supports mwait => All CPUs supports mwait
156 */ 311 */
157 if (!pm_idle) { 312 printk(KERN_INFO "using mwait in idle threads.\n");
158 printk(KERN_INFO "using mwait in idle threads.\n"); 313 pm_idle = mwait_idle;
159 pm_idle = mwait_idle; 314 } else if (check_c1e_idle(c)) {
160 } 315 printk(KERN_INFO "using C1E aware idle routine\n");
161 } 316 pm_idle = c1e_idle;
162 selected = 1; 317 } else
318 pm_idle = default_idle;
163} 319}
164 320
165static int __init idle_setup(char *str) 321static int __init idle_setup(char *str)