aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r--arch/x86/kernel/process.c220
1 files changed, 201 insertions, 19 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index ba370dc8685b..4d629c62f4f8 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -6,6 +6,13 @@
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/pm.h> 8#include <linux/pm.h>
9#include <linux/clockchips.h>
10#include <asm/system.h>
11
12unsigned long idle_halt;
13EXPORT_SYMBOL(idle_halt);
14unsigned long idle_nomwait;
15EXPORT_SYMBOL(idle_nomwait);
9 16
10struct kmem_cache *task_xstate_cachep; 17struct kmem_cache *task_xstate_cachep;
11 18
@@ -45,6 +52,76 @@ void arch_task_cache_init(void)
45 SLAB_PANIC, NULL); 52 SLAB_PANIC, NULL);
46} 53}
47 54
55/*
56 * Idle related variables and functions
57 */
58unsigned long boot_option_idle_override = 0;
59EXPORT_SYMBOL(boot_option_idle_override);
60
61/*
62 * Powermanagement idle function, if any..
63 */
64void (*pm_idle)(void);
65EXPORT_SYMBOL(pm_idle);
66
67#ifdef CONFIG_X86_32
68/*
69 * This halt magic was a workaround for ancient floppy DMA
70 * wreckage. It should be safe to remove.
71 */
72static int hlt_counter;
73void disable_hlt(void)
74{
75 hlt_counter++;
76}
77EXPORT_SYMBOL(disable_hlt);
78
79void enable_hlt(void)
80{
81 hlt_counter--;
82}
83EXPORT_SYMBOL(enable_hlt);
84
85static inline int hlt_use_halt(void)
86{
87 return (!hlt_counter && boot_cpu_data.hlt_works_ok);
88}
89#else
90static inline int hlt_use_halt(void)
91{
92 return 1;
93}
94#endif
95
96/*
97 * We use this if we don't have any better
98 * idle routine..
99 */
100void default_idle(void)
101{
102 if (hlt_use_halt()) {
103 current_thread_info()->status &= ~TS_POLLING;
104 /*
105 * TS_POLLING-cleared state must be visible before we
106 * test NEED_RESCHED:
107 */
108 smp_mb();
109
110 if (!need_resched())
111 safe_halt(); /* enables interrupts racelessly */
112 else
113 local_irq_enable();
114 current_thread_info()->status |= TS_POLLING;
115 } else {
116 local_irq_enable();
117 /* loop is done by the caller */
118 cpu_relax();
119 }
120}
121#ifdef CONFIG_APM_MODULE
122EXPORT_SYMBOL(default_idle);
123#endif
124
48static void do_nothing(void *unused) 125static void do_nothing(void *unused)
49{ 126{
50} 127}
@@ -61,7 +138,7 @@ void cpu_idle_wait(void)
61{ 138{
62 smp_mb(); 139 smp_mb();
63 /* kick all the CPUs so that they exit out of pm_idle */ 140 /* kick all the CPUs so that they exit out of pm_idle */
64 smp_call_function(do_nothing, NULL, 0, 1); 141 smp_call_function(do_nothing, NULL, 1);
65} 142}
66EXPORT_SYMBOL_GPL(cpu_idle_wait); 143EXPORT_SYMBOL_GPL(cpu_idle_wait);
67 144
@@ -122,44 +199,129 @@ static void poll_idle(void)
122 * 199 *
123 * idle=mwait overrides this decision and forces the usage of mwait. 200 * idle=mwait overrides this decision and forces the usage of mwait.
124 */ 201 */
202
203#define MWAIT_INFO 0x05
204#define MWAIT_ECX_EXTENDED_INFO 0x01
205#define MWAIT_EDX_C1 0xf0
206
125static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) 207static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
126{ 208{
209 u32 eax, ebx, ecx, edx;
210
127 if (force_mwait) 211 if (force_mwait)
128 return 1; 212 return 1;
129 213
130 if (c->x86_vendor == X86_VENDOR_AMD) { 214 if (c->cpuid_level < MWAIT_INFO)
131 switch(c->x86) { 215 return 0;
132 case 0x10: 216
133 case 0x11: 217 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
134 return 0; 218 /* Check, whether EDX has extended info about MWAIT */
135 } 219 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
136 } 220 return 1;
221
222 /*
223 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
224 * C1 supports MWAIT
225 */
226 return (edx & MWAIT_EDX_C1);
227}
228
229/*
230 * Check for AMD CPUs, which have potentially C1E support
231 */
232static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
233{
234 if (c->x86_vendor != X86_VENDOR_AMD)
235 return 0;
236
237 if (c->x86 < 0x0F)
238 return 0;
239
240 /* Family 0x0f models < rev F do not have C1E */
241 if (c->x86 == 0x0f && c->x86_model < 0x40)
242 return 0;
243
137 return 1; 244 return 1;
138} 245}
139 246
140void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 247/*
248 * C1E aware idle routine. We check for C1E active in the interrupt
249 * pending message MSR. If we detect C1E, then we handle it the same
250 * way as C3 power states (local apic timer and TSC stop)
251 */
252static void c1e_idle(void)
141{ 253{
142 static int selected; 254 static cpumask_t c1e_mask = CPU_MASK_NONE;
255 static int c1e_detected;
143 256
144 if (selected) 257 if (need_resched())
145 return; 258 return;
259
260 if (!c1e_detected) {
261 u32 lo, hi;
262
263 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
264 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
265 c1e_detected = 1;
266 mark_tsc_unstable("TSC halt in C1E");
267 printk(KERN_INFO "System has C1E enabled\n");
268 }
269 }
270
271 if (c1e_detected) {
272 int cpu = smp_processor_id();
273
274 if (!cpu_isset(cpu, c1e_mask)) {
275 cpu_set(cpu, c1e_mask);
276 /*
277 * Force broadcast so ACPI can not interfere. Needs
278 * to run with interrupts enabled as it uses
279 * smp_function_call.
280 */
281 local_irq_enable();
282 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
283 &cpu);
284 printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
285 cpu);
286 local_irq_disable();
287 }
288 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
289
290 default_idle();
291
292 /*
293 * The switch back from broadcast mode needs to be
294 * called with interrupts disabled.
295 */
296 local_irq_disable();
297 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
298 local_irq_enable();
299 } else
300 default_idle();
301}
302
303void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
304{
146#ifdef CONFIG_X86_SMP 305#ifdef CONFIG_X86_SMP
147 if (pm_idle == poll_idle && smp_num_siblings > 1) { 306 if (pm_idle == poll_idle && smp_num_siblings > 1) {
148 printk(KERN_WARNING "WARNING: polling idle and HT enabled," 307 printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
149 " performance may degrade.\n"); 308 " performance may degrade.\n");
150 } 309 }
151#endif 310#endif
311 if (pm_idle)
312 return;
313
152 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { 314 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
153 /* 315 /*
154 * Skip, if setup has overridden idle.
155 * One CPU supports mwait => All CPUs supports mwait 316 * One CPU supports mwait => All CPUs supports mwait
156 */ 317 */
157 if (!pm_idle) { 318 printk(KERN_INFO "using mwait in idle threads.\n");
158 printk(KERN_INFO "using mwait in idle threads.\n"); 319 pm_idle = mwait_idle;
159 pm_idle = mwait_idle; 320 } else if (check_c1e_idle(c)) {
160 } 321 printk(KERN_INFO "using C1E aware idle routine\n");
161 } 322 pm_idle = c1e_idle;
162 selected = 1; 323 } else
324 pm_idle = default_idle;
163} 325}
164 326
165static int __init idle_setup(char *str) 327static int __init idle_setup(char *str)
@@ -169,7 +331,27 @@ static int __init idle_setup(char *str)
169 pm_idle = poll_idle; 331 pm_idle = poll_idle;
170 } else if (!strcmp(str, "mwait")) 332 } else if (!strcmp(str, "mwait"))
171 force_mwait = 1; 333 force_mwait = 1;
172 else 334 else if (!strcmp(str, "halt")) {
335 /*
336 * When the boot option of idle=halt is added, halt is
337 * forced to be used for CPU idle. In such case CPU C2/C3
338 * won't be used again.
339 * To continue to load the CPU idle driver, don't touch
340 * the boot_option_idle_override.
341 */
342 pm_idle = default_idle;
343 idle_halt = 1;
344 return 0;
345 } else if (!strcmp(str, "nomwait")) {
346 /*
347 * If the boot option of "idle=nomwait" is added,
348 * it means that mwait will be disabled for CPU C2/C3
349 * states. In such case it won't touch the variable
350 * of boot_option_idle_override.
351 */
352 idle_nomwait = 1;
353 return 0;
354 } else
173 return -1; 355 return -1;
174 356
175 boot_option_idle_override = 1; 357 boot_option_idle_override = 1;