diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-29 17:28:26 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-29 17:28:26 -0400 |
commit | 6b8212a313dae341ef3a2e413dfec5c4dea59617 (patch) | |
tree | bbca09d88f61f999c7714fe82710bdfe6ee0e98b /arch/x86/kernel/process.c | |
parent | bcd550745fc54f789c14e7526e0633222c505faa (diff) | |
parent | 8abc3122aa02567bfe626cd13f4d34853c9b1225 (diff) |
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 updates from Ingo Molnar.
This touches some non-x86 files due to the sanitized INLINE_SPIN_UNLOCK
config usage.
Fixed up trivial conflicts due to just header include changes (removing
headers due to cpu_idle() merge clashing with the <asm/system.h> split).
* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/apic/amd: Be more verbose about LVT offset assignments
x86, tls: Off by one limit check
x86/ioapic: Add io_apic_ops driver layer to allow interception
x86/olpc: Add debugfs interface for EC commands
x86: Merge the x86_32 and x86_64 cpu_idle() functions
x86/kconfig: Remove CONFIG_TR=y from the defconfigs
x86: Stop recursive fault in print_context_stack after stack overflow
x86/io_apic: Move and reenable irq only when CONFIG_GENERIC_PENDING_IRQ=y
x86/apic: Add separate apic_id_valid() functions for selected apic drivers
locking/kconfig: Simplify INLINE_SPIN_UNLOCK usage
x86/kconfig: Update defconfigs
x86: Fix excessive MSR print out when show_msr is not specified
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r-- | arch/x86/kernel/process.c | 114 |
1 files changed, 114 insertions, 0 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 9b24f36eb55f..a33afaa5ddb7 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -12,6 +12,9 @@ | |||
12 | #include <linux/user-return-notifier.h> | 12 | #include <linux/user-return-notifier.h> |
13 | #include <linux/dmi.h> | 13 | #include <linux/dmi.h> |
14 | #include <linux/utsname.h> | 14 | #include <linux/utsname.h> |
15 | #include <linux/stackprotector.h> | ||
16 | #include <linux/tick.h> | ||
17 | #include <linux/cpuidle.h> | ||
15 | #include <trace/events/power.h> | 18 | #include <trace/events/power.h> |
16 | #include <linux/hw_breakpoint.h> | 19 | #include <linux/hw_breakpoint.h> |
17 | #include <asm/cpu.h> | 20 | #include <asm/cpu.h> |
@@ -22,6 +25,24 @@ | |||
22 | #include <asm/i387.h> | 25 | #include <asm/i387.h> |
23 | #include <asm/fpu-internal.h> | 26 | #include <asm/fpu-internal.h> |
24 | #include <asm/debugreg.h> | 27 | #include <asm/debugreg.h> |
28 | #include <asm/nmi.h> | ||
29 | |||
30 | #ifdef CONFIG_X86_64 | ||
31 | static DEFINE_PER_CPU(unsigned char, is_idle); | ||
32 | static ATOMIC_NOTIFIER_HEAD(idle_notifier); | ||
33 | |||
34 | void idle_notifier_register(struct notifier_block *n) | ||
35 | { | ||
36 | atomic_notifier_chain_register(&idle_notifier, n); | ||
37 | } | ||
38 | EXPORT_SYMBOL_GPL(idle_notifier_register); | ||
39 | |||
40 | void idle_notifier_unregister(struct notifier_block *n) | ||
41 | { | ||
42 | atomic_notifier_chain_unregister(&idle_notifier, n); | ||
43 | } | ||
44 | EXPORT_SYMBOL_GPL(idle_notifier_unregister); | ||
45 | #endif | ||
25 | 46 | ||
26 | struct kmem_cache *task_xstate_cachep; | 47 | struct kmem_cache *task_xstate_cachep; |
27 | EXPORT_SYMBOL_GPL(task_xstate_cachep); | 48 | EXPORT_SYMBOL_GPL(task_xstate_cachep); |
@@ -370,6 +391,99 @@ static inline int hlt_use_halt(void) | |||
370 | } | 391 | } |
371 | #endif | 392 | #endif |
372 | 393 | ||
394 | #ifndef CONFIG_SMP | ||
395 | static inline void play_dead(void) | ||
396 | { | ||
397 | BUG(); | ||
398 | } | ||
399 | #endif | ||
400 | |||
401 | #ifdef CONFIG_X86_64 | ||
402 | void enter_idle(void) | ||
403 | { | ||
404 | percpu_write(is_idle, 1); | ||
405 | atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); | ||
406 | } | ||
407 | |||
408 | static void __exit_idle(void) | ||
409 | { | ||
410 | if (x86_test_and_clear_bit_percpu(0, is_idle) == 0) | ||
411 | return; | ||
412 | atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); | ||
413 | } | ||
414 | |||
415 | /* Called from interrupts to signify idle end */ | ||
416 | void exit_idle(void) | ||
417 | { | ||
418 | /* idle loop has pid 0 */ | ||
419 | if (current->pid) | ||
420 | return; | ||
421 | __exit_idle(); | ||
422 | } | ||
423 | #endif | ||
424 | |||
425 | /* | ||
426 | * The idle thread. There's no useful work to be | ||
427 | * done, so just try to conserve power and have a | ||
428 | * low exit latency (ie sit in a loop waiting for | ||
429 | * somebody to say that they'd like to reschedule) | ||
430 | */ | ||
431 | void cpu_idle(void) | ||
432 | { | ||
433 | /* | ||
434 | * If we're the non-boot CPU, nothing set the stack canary up | ||
435 | * for us. CPU0 already has it initialized but no harm in | ||
436 | * doing it again. This is a good place for updating it, as | ||
437 | * we wont ever return from this function (so the invalid | ||
438 | * canaries already on the stack wont ever trigger). | ||
439 | */ | ||
440 | boot_init_stack_canary(); | ||
441 | current_thread_info()->status |= TS_POLLING; | ||
442 | |||
443 | while (1) { | ||
444 | tick_nohz_idle_enter(); | ||
445 | |||
446 | while (!need_resched()) { | ||
447 | rmb(); | ||
448 | |||
449 | if (cpu_is_offline(smp_processor_id())) | ||
450 | play_dead(); | ||
451 | |||
452 | /* | ||
453 | * Idle routines should keep interrupts disabled | ||
454 | * from here on, until they go to idle. | ||
455 | * Otherwise, idle callbacks can misfire. | ||
456 | */ | ||
457 | local_touch_nmi(); | ||
458 | local_irq_disable(); | ||
459 | |||
460 | enter_idle(); | ||
461 | |||
462 | /* Don't trace irqs off for idle */ | ||
463 | stop_critical_timings(); | ||
464 | |||
465 | /* enter_idle() needs rcu for notifiers */ | ||
466 | rcu_idle_enter(); | ||
467 | |||
468 | if (cpuidle_idle_call()) | ||
469 | pm_idle(); | ||
470 | |||
471 | rcu_idle_exit(); | ||
472 | start_critical_timings(); | ||
473 | |||
474 | /* In many cases the interrupt that ended idle | ||
475 | has already called exit_idle. But some idle | ||
476 | loops can be woken up without interrupt. */ | ||
477 | __exit_idle(); | ||
478 | } | ||
479 | |||
480 | tick_nohz_idle_exit(); | ||
481 | preempt_enable_no_resched(); | ||
482 | schedule(); | ||
483 | preempt_disable(); | ||
484 | } | ||
485 | } | ||
486 | |||
373 | /* | 487 | /* |
374 | * We use this if we don't have any better | 488 | * We use this if we don't have any better |
375 | * idle routine.. | 489 | * idle routine.. |