aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r--arch/x86/kernel/process.c150
1 files changed, 120 insertions, 30 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 15763af7bfe3..1d92a5ab6e8b 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -12,16 +12,37 @@
12#include <linux/user-return-notifier.h> 12#include <linux/user-return-notifier.h>
13#include <linux/dmi.h> 13#include <linux/dmi.h>
14#include <linux/utsname.h> 14#include <linux/utsname.h>
15#include <linux/stackprotector.h>
16#include <linux/tick.h>
17#include <linux/cpuidle.h>
15#include <trace/events/power.h> 18#include <trace/events/power.h>
16#include <linux/hw_breakpoint.h> 19#include <linux/hw_breakpoint.h>
17#include <asm/cpu.h> 20#include <asm/cpu.h>
18#include <asm/system.h>
19#include <asm/apic.h> 21#include <asm/apic.h>
20#include <asm/syscalls.h> 22#include <asm/syscalls.h>
21#include <asm/idle.h> 23#include <asm/idle.h>
22#include <asm/uaccess.h> 24#include <asm/uaccess.h>
23#include <asm/i387.h> 25#include <asm/i387.h>
26#include <asm/fpu-internal.h>
24#include <asm/debugreg.h> 27#include <asm/debugreg.h>
28#include <asm/nmi.h>
29
30#ifdef CONFIG_X86_64
31static DEFINE_PER_CPU(unsigned char, is_idle);
32static ATOMIC_NOTIFIER_HEAD(idle_notifier);
33
34void idle_notifier_register(struct notifier_block *n)
35{
36 atomic_notifier_chain_register(&idle_notifier, n);
37}
38EXPORT_SYMBOL_GPL(idle_notifier_register);
39
40void idle_notifier_unregister(struct notifier_block *n)
41{
42 atomic_notifier_chain_unregister(&idle_notifier, n);
43}
44EXPORT_SYMBOL_GPL(idle_notifier_unregister);
45#endif
25 46
26struct kmem_cache *task_xstate_cachep; 47struct kmem_cache *task_xstate_cachep;
27EXPORT_SYMBOL_GPL(task_xstate_cachep); 48EXPORT_SYMBOL_GPL(task_xstate_cachep);
@@ -341,44 +362,113 @@ void (*pm_idle)(void);
341EXPORT_SYMBOL(pm_idle); 362EXPORT_SYMBOL(pm_idle);
342#endif 363#endif
343 364
344#ifdef CONFIG_X86_32 365static inline int hlt_use_halt(void)
345/*
346 * This halt magic was a workaround for ancient floppy DMA
347 * wreckage. It should be safe to remove.
348 */
349static int hlt_counter;
350void disable_hlt(void)
351{ 366{
352 hlt_counter++; 367 return 1;
353} 368}
354EXPORT_SYMBOL(disable_hlt);
355 369
356void enable_hlt(void) 370#ifndef CONFIG_SMP
371static inline void play_dead(void)
357{ 372{
358 hlt_counter--; 373 BUG();
359} 374}
360EXPORT_SYMBOL(enable_hlt); 375#endif
361 376
362static inline int hlt_use_halt(void) 377#ifdef CONFIG_X86_64
378void enter_idle(void)
363{ 379{
364 return (!hlt_counter && boot_cpu_data.hlt_works_ok); 380 percpu_write(is_idle, 1);
381 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
365} 382}
366#else 383
367static inline int hlt_use_halt(void) 384static void __exit_idle(void)
368{ 385{
369 return 1; 386 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
387 return;
388 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
389}
390
391/* Called from interrupts to signify idle end */
392void exit_idle(void)
393{
394 /* idle loop has pid 0 */
395 if (current->pid)
396 return;
397 __exit_idle();
370} 398}
371#endif 399#endif
372 400
373/* 401/*
402 * The idle thread. There's no useful work to be
403 * done, so just try to conserve power and have a
404 * low exit latency (ie sit in a loop waiting for
405 * somebody to say that they'd like to reschedule)
406 */
407void cpu_idle(void)
408{
409 /*
410 * If we're the non-boot CPU, nothing set the stack canary up
411 * for us. CPU0 already has it initialized but no harm in
412 * doing it again. This is a good place for updating it, as
413 * we wont ever return from this function (so the invalid
414 * canaries already on the stack wont ever trigger).
415 */
416 boot_init_stack_canary();
417 current_thread_info()->status |= TS_POLLING;
418
419 while (1) {
420 tick_nohz_idle_enter();
421
422 while (!need_resched()) {
423 rmb();
424
425 if (cpu_is_offline(smp_processor_id()))
426 play_dead();
427
428 /*
429 * Idle routines should keep interrupts disabled
430 * from here on, until they go to idle.
431 * Otherwise, idle callbacks can misfire.
432 */
433 local_touch_nmi();
434 local_irq_disable();
435
436 enter_idle();
437
438 /* Don't trace irqs off for idle */
439 stop_critical_timings();
440
441 /* enter_idle() needs rcu for notifiers */
442 rcu_idle_enter();
443
444 if (cpuidle_idle_call())
445 pm_idle();
446
447 rcu_idle_exit();
448 start_critical_timings();
449
450 /* In many cases the interrupt that ended idle
451 has already called exit_idle. But some idle
452 loops can be woken up without interrupt. */
453 __exit_idle();
454 }
455
456 tick_nohz_idle_exit();
457 preempt_enable_no_resched();
458 schedule();
459 preempt_disable();
460 }
461}
462
463/*
374 * We use this if we don't have any better 464 * We use this if we don't have any better
375 * idle routine.. 465 * idle routine..
376 */ 466 */
377void default_idle(void) 467void default_idle(void)
378{ 468{
379 if (hlt_use_halt()) { 469 if (hlt_use_halt()) {
380 trace_power_start(POWER_CSTATE, 1, smp_processor_id()); 470 trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
381 trace_cpu_idle(1, smp_processor_id()); 471 trace_cpu_idle_rcuidle(1, smp_processor_id());
382 current_thread_info()->status &= ~TS_POLLING; 472 current_thread_info()->status &= ~TS_POLLING;
383 /* 473 /*
384 * TS_POLLING-cleared state must be visible before we 474 * TS_POLLING-cleared state must be visible before we
@@ -391,8 +481,8 @@ void default_idle(void)
391 else 481 else
392 local_irq_enable(); 482 local_irq_enable();
393 current_thread_info()->status |= TS_POLLING; 483 current_thread_info()->status |= TS_POLLING;
394 trace_power_end(smp_processor_id()); 484 trace_power_end_rcuidle(smp_processor_id());
395 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 485 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
396 } else { 486 } else {
397 local_irq_enable(); 487 local_irq_enable();
398 /* loop is done by the caller */ 488 /* loop is done by the caller */
@@ -450,8 +540,8 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
450static void mwait_idle(void) 540static void mwait_idle(void)
451{ 541{
452 if (!need_resched()) { 542 if (!need_resched()) {
453 trace_power_start(POWER_CSTATE, 1, smp_processor_id()); 543 trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
454 trace_cpu_idle(1, smp_processor_id()); 544 trace_cpu_idle_rcuidle(1, smp_processor_id());
455 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) 545 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
456 clflush((void *)&current_thread_info()->flags); 546 clflush((void *)&current_thread_info()->flags);
457 547
@@ -461,8 +551,8 @@ static void mwait_idle(void)
461 __sti_mwait(0, 0); 551 __sti_mwait(0, 0);
462 else 552 else
463 local_irq_enable(); 553 local_irq_enable();
464 trace_power_end(smp_processor_id()); 554 trace_power_end_rcuidle(smp_processor_id());
465 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 555 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
466 } else 556 } else
467 local_irq_enable(); 557 local_irq_enable();
468} 558}
@@ -474,13 +564,13 @@ static void mwait_idle(void)
474 */ 564 */
475static void poll_idle(void) 565static void poll_idle(void)
476{ 566{
477 trace_power_start(POWER_CSTATE, 0, smp_processor_id()); 567 trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
478 trace_cpu_idle(0, smp_processor_id()); 568 trace_cpu_idle_rcuidle(0, smp_processor_id());
479 local_irq_enable(); 569 local_irq_enable();
480 while (!need_resched()) 570 while (!need_resched())
481 cpu_relax(); 571 cpu_relax();
482 trace_power_end(smp_processor_id()); 572 trace_power_end_rcuidle(smp_processor_id());
483 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 573 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
484} 574}
485 575
486/* 576/*