diff options
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r-- | arch/x86/kernel/process_64.c | 123 |
1 files changed, 3 insertions, 120 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 131c2ee7ac56..e2319f39988b 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -106,26 +106,13 @@ void default_idle(void) | |||
106 | * test NEED_RESCHED: | 106 | * test NEED_RESCHED: |
107 | */ | 107 | */ |
108 | smp_mb(); | 108 | smp_mb(); |
109 | local_irq_disable(); | 109 | if (!need_resched()) |
110 | if (!need_resched()) { | ||
111 | safe_halt(); /* enables interrupts racelessly */ | 110 | safe_halt(); /* enables interrupts racelessly */ |
112 | local_irq_disable(); | 111 | else |
113 | } | 112 | local_irq_enable(); |
114 | local_irq_enable(); | ||
115 | current_thread_info()->status |= TS_POLLING; | 113 | current_thread_info()->status |= TS_POLLING; |
116 | } | 114 | } |
117 | 115 | ||
118 | /* | ||
119 | * On SMP it's slightly faster (but much more power-consuming!) | ||
120 | * to poll the ->need_resched flag instead of waiting for the | ||
121 | * cross-CPU IPI to arrive. Use this option with caution. | ||
122 | */ | ||
123 | static void poll_idle(void) | ||
124 | { | ||
125 | local_irq_enable(); | ||
126 | cpu_relax(); | ||
127 | } | ||
128 | |||
129 | #ifdef CONFIG_HOTPLUG_CPU | 116 | #ifdef CONFIG_HOTPLUG_CPU |
130 | DECLARE_PER_CPU(int, cpu_state); | 117 | DECLARE_PER_CPU(int, cpu_state); |
131 | 118 | ||
@@ -192,110 +179,6 @@ void cpu_idle(void) | |||
192 | } | 179 | } |
193 | } | 180 | } |
194 | 181 | ||
195 | static void do_nothing(void *unused) | ||
196 | { | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * cpu_idle_wait - Used to ensure that all the CPUs discard old value of | ||
201 | * pm_idle and update to new pm_idle value. Required while changing pm_idle | ||
202 | * handler on SMP systems. | ||
203 | * | ||
204 | * Caller must have changed pm_idle to the new value before the call. Old | ||
205 | * pm_idle value will not be used by any CPU after the return of this function. | ||
206 | */ | ||
207 | void cpu_idle_wait(void) | ||
208 | { | ||
209 | smp_mb(); | ||
210 | /* kick all the CPUs so that they exit out of pm_idle */ | ||
211 | smp_call_function(do_nothing, NULL, 0, 1); | ||
212 | } | ||
213 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | ||
214 | |||
215 | /* | ||
216 | * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, | ||
217 | * which can obviate IPI to trigger checking of need_resched. | ||
218 | * We execute MONITOR against need_resched and enter optimized wait state | ||
219 | * through MWAIT. Whenever someone changes need_resched, we would be woken | ||
220 | * up from MWAIT (without an IPI). | ||
221 | * | ||
222 | * New with Core Duo processors, MWAIT can take some hints based on CPU | ||
223 | * capability. | ||
224 | */ | ||
225 | void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | ||
226 | { | ||
227 | if (!need_resched()) { | ||
228 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | ||
229 | smp_mb(); | ||
230 | if (!need_resched()) | ||
231 | __mwait(ax, cx); | ||
232 | } | ||
233 | } | ||
234 | |||
235 | /* Default MONITOR/MWAIT with no hints, used for default C1 state */ | ||
236 | static void mwait_idle(void) | ||
237 | { | ||
238 | if (!need_resched()) { | ||
239 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | ||
240 | smp_mb(); | ||
241 | if (!need_resched()) | ||
242 | __sti_mwait(0, 0); | ||
243 | else | ||
244 | local_irq_enable(); | ||
245 | } else { | ||
246 | local_irq_enable(); | ||
247 | } | ||
248 | } | ||
249 | |||
250 | |||
251 | static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | ||
252 | { | ||
253 | if (force_mwait) | ||
254 | return 1; | ||
255 | /* Any C1 states supported? */ | ||
256 | return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0; | ||
257 | } | ||
258 | |||
259 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | ||
260 | { | ||
261 | static int selected; | ||
262 | |||
263 | if (selected) | ||
264 | return; | ||
265 | #ifdef CONFIG_X86_SMP | ||
266 | if (pm_idle == poll_idle && smp_num_siblings > 1) { | ||
267 | printk(KERN_WARNING "WARNING: polling idle and HT enabled," | ||
268 | " performance may degrade.\n"); | ||
269 | } | ||
270 | #endif | ||
271 | if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { | ||
272 | /* | ||
273 | * Skip, if setup has overridden idle. | ||
274 | * One CPU supports mwait => All CPUs supports mwait | ||
275 | */ | ||
276 | if (!pm_idle) { | ||
277 | printk(KERN_INFO "using mwait in idle threads.\n"); | ||
278 | pm_idle = mwait_idle; | ||
279 | } | ||
280 | } | ||
281 | selected = 1; | ||
282 | } | ||
283 | |||
284 | static int __init idle_setup(char *str) | ||
285 | { | ||
286 | if (!strcmp(str, "poll")) { | ||
287 | printk("using polling idle threads.\n"); | ||
288 | pm_idle = poll_idle; | ||
289 | } else if (!strcmp(str, "mwait")) | ||
290 | force_mwait = 1; | ||
291 | else | ||
292 | return -1; | ||
293 | |||
294 | boot_option_idle_override = 1; | ||
295 | return 0; | ||
296 | } | ||
297 | early_param("idle", idle_setup); | ||
298 | |||
299 | /* Prints also some state that isn't saved in the pt_regs */ | 182 | /* Prints also some state that isn't saved in the pt_regs */ |
300 | void __show_regs(struct pt_regs * regs) | 183 | void __show_regs(struct pt_regs * regs) |
301 | { | 184 | { |