aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-02-18 16:34:11 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-02-18 16:34:11 -0500
commit10baf04e95fbf7eb6089410220a547211dd2ffa7 (patch)
tree912204612987a3ce2ec0ed214d47911040d79cc1 /arch
parentfdbe0946d4c35d4cc784cfe0a5322708cfb7ade8 (diff)
parentca62cf59ceef10ff2ebca0e7f764507186870270 (diff)
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux: (35 commits) PM idle: remove global declaration of pm_idle unicore32 idle: delete stray pm_idle comment openrisc idle: delete pm_idle mn10300 idle: delete pm_idle microblaze idle: delete pm_idle m32r idle: delete pm_idle, and other dead idle code ia64 idle: delete pm_idle cris idle: delete idle and pm_idle ARM64 idle: delete pm_idle ARM idle: delete pm_idle blackfin idle: delete pm_idle sparc idle: rename pm_idle to sparc_idle sh idle: rename global pm_idle to static sh_idle x86 idle: rename global pm_idle to static x86_idle APM idle: register apm_cpu_idle via cpuidle tools/power turbostat: display SMI count by default intel_idle: export both C1 and C1E cpuidle: remove vestage definition of cpuidle_state_usage.driver_data x86 idle: remove 32-bit-only "no-hlt" parameter, hlt_works_ok flag x86 idle: remove mwait_idle() and "idle=mwait" cmdline param ... Conflicts: arch/x86/kernel/process.c (with PM / tracing commit 43720bd) drivers/acpi/processor_idle.c (with ACPICA commit 4f84291)
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/process.c13
-rw-r--r--arch/arm/mach-davinci/cpuidle.c84
-rw-r--r--arch/arm64/kernel/process.c13
-rw-r--r--arch/blackfin/kernel/process.c7
-rw-r--r--arch/cris/kernel/process.c11
-rw-r--r--arch/ia64/kernel/process.c3
-rw-r--r--arch/ia64/kernel/setup.c1
-rw-r--r--arch/m32r/kernel/process.c51
-rw-r--r--arch/microblaze/kernel/process.c3
-rw-r--r--arch/mn10300/kernel/process.c7
-rw-r--r--arch/openrisc/kernel/idle.c5
-rw-r--r--arch/sh/kernel/idle.c12
-rw-r--r--arch/sparc/include/asm/processor_32.h1
-rw-r--r--arch/sparc/kernel/apc.c3
-rw-r--r--arch/sparc/kernel/leon_pmc.c5
-rw-r--r--arch/sparc/kernel/pmc.c3
-rw-r--r--arch/sparc/kernel/process_32.c7
-rw-r--r--arch/unicore32/kernel/process.c5
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/mwait.h3
-rw-r--r--arch/x86/include/asm/processor.h18
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h3
-rw-r--r--arch/x86/kernel/apm_32.c57
-rw-r--r--arch/x86/kernel/cpu/bugs.c27
-rw-r--r--arch/x86/kernel/cpu/proc.c2
-rw-r--r--arch/x86/kernel/process.c116
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/xen/setup.c5
28 files changed, 118 insertions, 350 deletions
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index c6dec5fc20aa..047d3e40e470 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -172,14 +172,9 @@ static void default_idle(void)
172 local_irq_enable(); 172 local_irq_enable();
173} 173}
174 174
175void (*pm_idle)(void) = default_idle;
176EXPORT_SYMBOL(pm_idle);
177
178/* 175/*
179 * The idle thread, has rather strange semantics for calling pm_idle, 176 * The idle thread.
180 * but this is what x86 does and we need to do the same, so that 177 * We always respect 'hlt_counter' to prevent low power idle.
181 * things like cpuidle get called in the same way. The only difference
182 * is that we always respect 'hlt_counter' to prevent low power idle.
183 */ 178 */
184void cpu_idle(void) 179void cpu_idle(void)
185{ 180{
@@ -210,10 +205,10 @@ void cpu_idle(void)
210 } else if (!need_resched()) { 205 } else if (!need_resched()) {
211 stop_critical_timings(); 206 stop_critical_timings();
212 if (cpuidle_idle_call()) 207 if (cpuidle_idle_call())
213 pm_idle(); 208 default_idle();
214 start_critical_timings(); 209 start_critical_timings();
215 /* 210 /*
216 * pm_idle functions must always 211 * default_idle functions must always
217 * return with IRQs enabled. 212 * return with IRQs enabled.
218 */ 213 */
219 WARN_ON(irqs_disabled()); 214 WARN_ON(irqs_disabled());
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index 9107691adbdb..5ac9e9384b15 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -25,35 +25,44 @@
25 25
26#define DAVINCI_CPUIDLE_MAX_STATES 2 26#define DAVINCI_CPUIDLE_MAX_STATES 2
27 27
28struct davinci_ops { 28static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
29 void (*enter) (u32 flags); 29static void __iomem *ddr2_reg_base;
30 void (*exit) (u32 flags); 30static bool ddr2_pdown;
31 u32 flags; 31
32}; 32static void davinci_save_ddr_power(int enter, bool pdown)
33{
34 u32 val;
35
36 val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET);
37
38 if (enter) {
39 if (pdown)
40 val |= DDR2_SRPD_BIT;
41 else
42 val &= ~DDR2_SRPD_BIT;
43 val |= DDR2_LPMODEN_BIT;
44 } else {
45 val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT);
46 }
47
48 __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET);
49}
33 50
34/* Actual code that puts the SoC in different idle states */ 51/* Actual code that puts the SoC in different idle states */
35static int davinci_enter_idle(struct cpuidle_device *dev, 52static int davinci_enter_idle(struct cpuidle_device *dev,
36 struct cpuidle_driver *drv, 53 struct cpuidle_driver *drv,
37 int index) 54 int index)
38{ 55{
39 struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 56 davinci_save_ddr_power(1, ddr2_pdown);
40 struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
41
42 if (ops && ops->enter)
43 ops->enter(ops->flags);
44 57
45 index = cpuidle_wrap_enter(dev, drv, index, 58 index = cpuidle_wrap_enter(dev, drv, index,
46 arm_cpuidle_simple_enter); 59 arm_cpuidle_simple_enter);
47 60
48 if (ops && ops->exit) 61 davinci_save_ddr_power(0, ddr2_pdown);
49 ops->exit(ops->flags);
50 62
51 return index; 63 return index;
52} 64}
53 65
54/* fields in davinci_ops.flags */
55#define DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN BIT(0)
56
57static struct cpuidle_driver davinci_idle_driver = { 66static struct cpuidle_driver davinci_idle_driver = {
58 .name = "cpuidle-davinci", 67 .name = "cpuidle-davinci",
59 .owner = THIS_MODULE, 68 .owner = THIS_MODULE,
@@ -70,45 +79,6 @@ static struct cpuidle_driver davinci_idle_driver = {
70 .state_count = DAVINCI_CPUIDLE_MAX_STATES, 79 .state_count = DAVINCI_CPUIDLE_MAX_STATES,
71}; 80};
72 81
73static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
74static void __iomem *ddr2_reg_base;
75
76static void davinci_save_ddr_power(int enter, bool pdown)
77{
78 u32 val;
79
80 val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET);
81
82 if (enter) {
83 if (pdown)
84 val |= DDR2_SRPD_BIT;
85 else
86 val &= ~DDR2_SRPD_BIT;
87 val |= DDR2_LPMODEN_BIT;
88 } else {
89 val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT);
90 }
91
92 __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET);
93}
94
95static void davinci_c2state_enter(u32 flags)
96{
97 davinci_save_ddr_power(1, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN));
98}
99
100static void davinci_c2state_exit(u32 flags)
101{
102 davinci_save_ddr_power(0, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN));
103}
104
105static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = {
106 [1] = {
107 .enter = davinci_c2state_enter,
108 .exit = davinci_c2state_exit,
109 },
110};
111
112static int __init davinci_cpuidle_probe(struct platform_device *pdev) 82static int __init davinci_cpuidle_probe(struct platform_device *pdev)
113{ 83{
114 int ret; 84 int ret;
@@ -124,11 +94,7 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
124 94
125 ddr2_reg_base = pdata->ddr2_ctlr_base; 95 ddr2_reg_base = pdata->ddr2_ctlr_base;
126 96
127 if (pdata->ddr2_pdown) 97 ddr2_pdown = pdata->ddr2_pdown;
128 davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
129 cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]);
130
131 device->state_count = DAVINCI_CPUIDLE_MAX_STATES;
132 98
133 ret = cpuidle_register_driver(&davinci_idle_driver); 99 ret = cpuidle_register_driver(&davinci_idle_driver);
134 if (ret) { 100 if (ret) {
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index cb0956bc96ed..c7002d40a9b0 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -97,14 +97,9 @@ static void default_idle(void)
97 local_irq_enable(); 97 local_irq_enable();
98} 98}
99 99
100void (*pm_idle)(void) = default_idle;
101EXPORT_SYMBOL_GPL(pm_idle);
102
103/* 100/*
104 * The idle thread, has rather strange semantics for calling pm_idle, 101 * The idle thread.
105 * but this is what x86 does and we need to do the same, so that 102 * We always respect 'hlt_counter' to prevent low power idle.
106 * things like cpuidle get called in the same way. The only difference
107 * is that we always respect 'hlt_counter' to prevent low power idle.
108 */ 103 */
109void cpu_idle(void) 104void cpu_idle(void)
110{ 105{
@@ -122,10 +117,10 @@ void cpu_idle(void)
122 local_irq_disable(); 117 local_irq_disable();
123 if (!need_resched()) { 118 if (!need_resched()) {
124 stop_critical_timings(); 119 stop_critical_timings();
125 pm_idle(); 120 default_idle();
126 start_critical_timings(); 121 start_critical_timings();
127 /* 122 /*
128 * pm_idle functions should always return 123 * default_idle functions should always return
129 * with IRQs enabled. 124 * with IRQs enabled.
130 */ 125 */
131 WARN_ON(irqs_disabled()); 126 WARN_ON(irqs_disabled());
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 3e16ad9b0a99..8061426b7df5 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -39,12 +39,6 @@ int nr_l1stack_tasks;
39void *l1_stack_base; 39void *l1_stack_base;
40unsigned long l1_stack_len; 40unsigned long l1_stack_len;
41 41
42/*
43 * Powermanagement idle function, if any..
44 */
45void (*pm_idle)(void) = NULL;
46EXPORT_SYMBOL(pm_idle);
47
48void (*pm_power_off)(void) = NULL; 42void (*pm_power_off)(void) = NULL;
49EXPORT_SYMBOL(pm_power_off); 43EXPORT_SYMBOL(pm_power_off);
50 44
@@ -81,7 +75,6 @@ void cpu_idle(void)
81{ 75{
82 /* endless idle loop with no priority at all */ 76 /* endless idle loop with no priority at all */
83 while (1) { 77 while (1) {
84 void (*idle)(void) = pm_idle;
85 78
86#ifdef CONFIG_HOTPLUG_CPU 79#ifdef CONFIG_HOTPLUG_CPU
87 if (cpu_is_offline(smp_processor_id())) 80 if (cpu_is_offline(smp_processor_id()))
diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c
index 7f65be6f7f17..104ff4dd9b98 100644
--- a/arch/cris/kernel/process.c
+++ b/arch/cris/kernel/process.c
@@ -54,11 +54,6 @@ void enable_hlt(void)
54 54
55EXPORT_SYMBOL(enable_hlt); 55EXPORT_SYMBOL(enable_hlt);
56 56
57/*
58 * The following aren't currently used.
59 */
60void (*pm_idle)(void);
61
62extern void default_idle(void); 57extern void default_idle(void);
63 58
64void (*pm_power_off)(void); 59void (*pm_power_off)(void);
@@ -77,16 +72,12 @@ void cpu_idle (void)
77 while (1) { 72 while (1) {
78 rcu_idle_enter(); 73 rcu_idle_enter();
79 while (!need_resched()) { 74 while (!need_resched()) {
80 void (*idle)(void);
81 /* 75 /*
82 * Mark this as an RCU critical section so that 76 * Mark this as an RCU critical section so that
83 * synchronize_kernel() in the unload path waits 77 * synchronize_kernel() in the unload path waits
84 * for our completion. 78 * for our completion.
85 */ 79 */
86 idle = pm_idle; 80 default_idle();
87 if (!idle)
88 idle = default_idle;
89 idle();
90 } 81 }
91 rcu_idle_exit(); 82 rcu_idle_exit();
92 schedule_preempt_disabled(); 83 schedule_preempt_disabled();
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 31360cbbd5f8..e34f565f595a 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -57,8 +57,6 @@ void (*ia64_mark_idle)(int);
57 57
58unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; 58unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
59EXPORT_SYMBOL(boot_option_idle_override); 59EXPORT_SYMBOL(boot_option_idle_override);
60void (*pm_idle) (void);
61EXPORT_SYMBOL(pm_idle);
62void (*pm_power_off) (void); 60void (*pm_power_off) (void);
63EXPORT_SYMBOL(pm_power_off); 61EXPORT_SYMBOL(pm_power_off);
64 62
@@ -301,7 +299,6 @@ cpu_idle (void)
301 if (mark_idle) 299 if (mark_idle)
302 (*mark_idle)(1); 300 (*mark_idle)(1);
303 301
304 idle = pm_idle;
305 if (!idle) 302 if (!idle)
306 idle = default_idle; 303 idle = default_idle;
307 (*idle)(); 304 (*idle)();
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index aaefd9b94f2f..2029cc0d2fc6 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -1051,7 +1051,6 @@ cpu_init (void)
1051 max_num_phys_stacked = num_phys_stacked; 1051 max_num_phys_stacked = num_phys_stacked;
1052 } 1052 }
1053 platform_cpu_init(); 1053 platform_cpu_init();
1054 pm_idle = default_idle;
1055} 1054}
1056 1055
1057void __init 1056void __init
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index 765d0f57c787..bde899e155d3 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -44,36 +44,10 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
44 return tsk->thread.lr; 44 return tsk->thread.lr;
45} 45}
46 46
47/*
48 * Powermanagement idle function, if any..
49 */
50static void (*pm_idle)(void) = NULL;
51
52void (*pm_power_off)(void) = NULL; 47void (*pm_power_off)(void) = NULL;
53EXPORT_SYMBOL(pm_power_off); 48EXPORT_SYMBOL(pm_power_off);
54 49
55/* 50/*
56 * We use this is we don't have any better
57 * idle routine..
58 */
59static void default_idle(void)
60{
61 /* M32R_FIXME: Please use "cpu_sleep" mode. */
62 cpu_relax();
63}
64
65/*
66 * On SMP it's slightly faster (but much more power-consuming!)
67 * to poll the ->work.need_resched flag instead of waiting for the
68 * cross-CPU IPI to arrive. Use this option with caution.
69 */
70static void poll_idle (void)
71{
72 /* M32R_FIXME */
73 cpu_relax();
74}
75
76/*
77 * The idle thread. There's no useful work to be 51 * The idle thread. There's no useful work to be
78 * done, so just try to conserve power and have a 52 * done, so just try to conserve power and have a
79 * low exit latency (ie sit in a loop waiting for 53 * low exit latency (ie sit in a loop waiting for
@@ -84,14 +58,8 @@ void cpu_idle (void)
84 /* endless idle loop with no priority at all */ 58 /* endless idle loop with no priority at all */
85 while (1) { 59 while (1) {
86 rcu_idle_enter(); 60 rcu_idle_enter();
87 while (!need_resched()) { 61 while (!need_resched())
88 void (*idle)(void) = pm_idle; 62 cpu_relax();
89
90 if (!idle)
91 idle = default_idle;
92
93 idle();
94 }
95 rcu_idle_exit(); 63 rcu_idle_exit();
96 schedule_preempt_disabled(); 64 schedule_preempt_disabled();
97 } 65 }
@@ -120,21 +88,6 @@ void machine_power_off(void)
120 /* M32R_FIXME */ 88 /* M32R_FIXME */
121} 89}
122 90
123static int __init idle_setup (char *str)
124{
125 if (!strncmp(str, "poll", 4)) {
126 printk("using poll in idle threads.\n");
127 pm_idle = poll_idle;
128 } else if (!strncmp(str, "sleep", 4)) {
129 printk("using sleep in idle threads.\n");
130 pm_idle = default_idle;
131 }
132
133 return 1;
134}
135
136__setup("idle=", idle_setup);
137
138void show_regs(struct pt_regs * regs) 91void show_regs(struct pt_regs * regs)
139{ 92{
140 printk("\n"); 93 printk("\n");
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index a5b74f729e5b..6ff2dcff3410 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -41,7 +41,6 @@ void show_regs(struct pt_regs *regs)
41 regs->msr, regs->ear, regs->esr, regs->fsr); 41 regs->msr, regs->ear, regs->esr, regs->fsr);
42} 42}
43 43
44void (*pm_idle)(void);
45void (*pm_power_off)(void) = NULL; 44void (*pm_power_off)(void) = NULL;
46EXPORT_SYMBOL(pm_power_off); 45EXPORT_SYMBOL(pm_power_off);
47 46
@@ -98,8 +97,6 @@ void cpu_idle(void)
98 97
99 /* endless idle loop with no priority at all */ 98 /* endless idle loop with no priority at all */
100 while (1) { 99 while (1) {
101 void (*idle)(void) = pm_idle;
102
103 if (!idle) 100 if (!idle)
104 idle = default_idle; 101 idle = default_idle;
105 102
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index eb09f5a552ff..84f4e97e3074 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -37,12 +37,6 @@
37#include "internal.h" 37#include "internal.h"
38 38
39/* 39/*
40 * power management idle function, if any..
41 */
42void (*pm_idle)(void);
43EXPORT_SYMBOL(pm_idle);
44
45/*
46 * return saved PC of a blocked thread. 40 * return saved PC of a blocked thread.
47 */ 41 */
48unsigned long thread_saved_pc(struct task_struct *tsk) 42unsigned long thread_saved_pc(struct task_struct *tsk)
@@ -113,7 +107,6 @@ void cpu_idle(void)
113 void (*idle)(void); 107 void (*idle)(void);
114 108
115 smp_rmb(); 109 smp_rmb();
116 idle = pm_idle;
117 if (!idle) { 110 if (!idle) {
118#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU) 111#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
119 idle = poll_idle; 112 idle = poll_idle;
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c
index 7d618feb1b72..5e8a3b6d6bc6 100644
--- a/arch/openrisc/kernel/idle.c
+++ b/arch/openrisc/kernel/idle.c
@@ -39,11 +39,6 @@
39 39
40void (*powersave) (void) = NULL; 40void (*powersave) (void) = NULL;
41 41
42static inline void pm_idle(void)
43{
44 barrier();
45}
46
47void cpu_idle(void) 42void cpu_idle(void)
48{ 43{
49 set_thread_flag(TIF_POLLING_NRFLAG); 44 set_thread_flag(TIF_POLLING_NRFLAG);
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 0c910163caa3..3d5a1b387cc0 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -22,7 +22,7 @@
22#include <asm/smp.h> 22#include <asm/smp.h>
23#include <asm/bl_bit.h> 23#include <asm/bl_bit.h>
24 24
25void (*pm_idle)(void); 25static void (*sh_idle)(void);
26 26
27static int hlt_counter; 27static int hlt_counter;
28 28
@@ -103,9 +103,9 @@ void cpu_idle(void)
103 /* Don't trace irqs off for idle */ 103 /* Don't trace irqs off for idle */
104 stop_critical_timings(); 104 stop_critical_timings();
105 if (cpuidle_idle_call()) 105 if (cpuidle_idle_call())
106 pm_idle(); 106 sh_idle();
107 /* 107 /*
108 * Sanity check to ensure that pm_idle() returns 108 * Sanity check to ensure that sh_idle() returns
109 * with IRQs enabled 109 * with IRQs enabled
110 */ 110 */
111 WARN_ON(irqs_disabled()); 111 WARN_ON(irqs_disabled());
@@ -123,13 +123,13 @@ void __init select_idle_routine(void)
123 /* 123 /*
124 * If a platform has set its own idle routine, leave it alone. 124 * If a platform has set its own idle routine, leave it alone.
125 */ 125 */
126 if (pm_idle) 126 if (sh_idle)
127 return; 127 return;
128 128
129 if (hlt_works()) 129 if (hlt_works())
130 pm_idle = default_idle; 130 sh_idle = default_idle;
131 else 131 else
132 pm_idle = poll_idle; 132 sh_idle = poll_idle;
133} 133}
134 134
135void stop_this_cpu(void *unused) 135void stop_this_cpu(void *unused)
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index c1e01914fd98..2c7baa4c4505 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -118,6 +118,7 @@ extern unsigned long get_wchan(struct task_struct *);
118extern struct task_struct *last_task_used_math; 118extern struct task_struct *last_task_used_math;
119 119
120#define cpu_relax() barrier() 120#define cpu_relax() barrier()
121extern void (*sparc_idle)(void);
121 122
122#endif 123#endif
123 124
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index 348fa1aeabce..eefda32b595e 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -20,6 +20,7 @@
20#include <asm/uaccess.h> 20#include <asm/uaccess.h>
21#include <asm/auxio.h> 21#include <asm/auxio.h>
22#include <asm/apc.h> 22#include <asm/apc.h>
23#include <asm/processor.h>
23 24
24/* Debugging 25/* Debugging
25 * 26 *
@@ -158,7 +159,7 @@ static int apc_probe(struct platform_device *op)
158 159
159 /* Assign power management IDLE handler */ 160 /* Assign power management IDLE handler */
160 if (!apc_no_idle) 161 if (!apc_no_idle)
161 pm_idle = apc_swift_idle; 162 sparc_idle = apc_swift_idle;
162 163
163 printk(KERN_INFO "%s: power management initialized%s\n", 164 printk(KERN_INFO "%s: power management initialized%s\n",
164 APC_DEVNAME, apc_no_idle ? " (CPU idle disabled)" : ""); 165 APC_DEVNAME, apc_no_idle ? " (CPU idle disabled)" : "");
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
index 4e174321097d..708bca435219 100644
--- a/arch/sparc/kernel/leon_pmc.c
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -9,6 +9,7 @@
9#include <asm/leon_amba.h> 9#include <asm/leon_amba.h>
10#include <asm/cpu_type.h> 10#include <asm/cpu_type.h>
11#include <asm/leon.h> 11#include <asm/leon.h>
12#include <asm/processor.h>
12 13
13/* List of Systems that need fixup instructions around power-down instruction */ 14/* List of Systems that need fixup instructions around power-down instruction */
14unsigned int pmc_leon_fixup_ids[] = { 15unsigned int pmc_leon_fixup_ids[] = {
@@ -69,9 +70,9 @@ static int __init leon_pmc_install(void)
69 if (sparc_cpu_model == sparc_leon) { 70 if (sparc_cpu_model == sparc_leon) {
70 /* Assign power management IDLE handler */ 71 /* Assign power management IDLE handler */
71 if (pmc_leon_need_fixup()) 72 if (pmc_leon_need_fixup())
72 pm_idle = pmc_leon_idle_fixup; 73 sparc_idle = pmc_leon_idle_fixup;
73 else 74 else
74 pm_idle = pmc_leon_idle; 75 sparc_idle = pmc_leon_idle;
75 76
76 printk(KERN_INFO "leon: power management initialized\n"); 77 printk(KERN_INFO "leon: power management initialized\n");
77 } 78 }
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
index dcbb62f63068..8b7297faca79 100644
--- a/arch/sparc/kernel/pmc.c
+++ b/arch/sparc/kernel/pmc.c
@@ -17,6 +17,7 @@
17#include <asm/oplib.h> 17#include <asm/oplib.h>
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include <asm/auxio.h> 19#include <asm/auxio.h>
20#include <asm/processor.h>
20 21
21/* Debug 22/* Debug
22 * 23 *
@@ -63,7 +64,7 @@ static int pmc_probe(struct platform_device *op)
63 64
64#ifndef PMC_NO_IDLE 65#ifndef PMC_NO_IDLE
65 /* Assign power management IDLE handler */ 66 /* Assign power management IDLE handler */
66 pm_idle = pmc_swift_idle; 67 sparc_idle = pmc_swift_idle;
67#endif 68#endif
68 69
69 printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME); 70 printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME);
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index be8e862badaf..62eede13831a 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -43,8 +43,7 @@
43 * Power management idle function 43 * Power management idle function
44 * Set in pm platform drivers (apc.c and pmc.c) 44 * Set in pm platform drivers (apc.c and pmc.c)
45 */ 45 */
46void (*pm_idle)(void); 46void (*sparc_idle)(void);
47EXPORT_SYMBOL(pm_idle);
48 47
49/* 48/*
50 * Power-off handler instantiation for pm.h compliance 49 * Power-off handler instantiation for pm.h compliance
@@ -75,8 +74,8 @@ void cpu_idle(void)
75 /* endless idle loop with no priority at all */ 74 /* endless idle loop with no priority at all */
76 for (;;) { 75 for (;;) {
77 while (!need_resched()) { 76 while (!need_resched()) {
78 if (pm_idle) 77 if (sparc_idle)
79 (*pm_idle)(); 78 (*sparc_idle)();
80 else 79 else
81 cpu_relax(); 80 cpu_relax();
82 } 81 }
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c
index 62bad9fed03e..872d7e22d847 100644
--- a/arch/unicore32/kernel/process.c
+++ b/arch/unicore32/kernel/process.c
@@ -45,11 +45,6 @@ static const char * const processor_modes[] = {
45 "UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR" 45 "UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR"
46}; 46};
47 47
48/*
49 * The idle thread, has rather strange semantics for calling pm_idle,
50 * but this is what x86 does and we need to do the same, so that
51 * things like cpuidle get called in the same way.
52 */
53void cpu_idle(void) 48void cpu_idle(void)
54{ 49{
55 /* endless idle loop with no priority at all */ 50 /* endless idle loop with no priority at all */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4f7c2da2f9f8..c03309f697f1 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1922,6 +1922,7 @@ config APM_DO_ENABLE
1922 this feature. 1922 this feature.
1923 1923
1924config APM_CPU_IDLE 1924config APM_CPU_IDLE
1925 depends on CPU_IDLE
1925 bool "Make CPU Idle calls when idle" 1926 bool "Make CPU Idle calls when idle"
1926 ---help--- 1927 ---help---
1927 Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop. 1928 Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop.
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index bcdff997668c..2f366d0ac6b4 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -4,7 +4,8 @@
4#define MWAIT_SUBSTATE_MASK 0xf 4#define MWAIT_SUBSTATE_MASK 0xf
5#define MWAIT_CSTATE_MASK 0xf 5#define MWAIT_CSTATE_MASK 0xf
6#define MWAIT_SUBSTATE_SIZE 4 6#define MWAIT_SUBSTATE_SIZE 4
7#define MWAIT_MAX_NUM_CSTATES 8 7#define MWAIT_HINT2CSTATE(hint) (((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK)
8#define MWAIT_HINT2SUBSTATE(hint) ((hint) & MWAIT_CSTATE_MASK)
8 9
9#define CPUID_MWAIT_LEAF 5 10#define CPUID_MWAIT_LEAF 5
10#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1 11#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 888184b2fc85..b9e7d279f8ef 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -89,7 +89,6 @@ struct cpuinfo_x86 {
89 char wp_works_ok; /* It doesn't on 386's */ 89 char wp_works_ok; /* It doesn't on 386's */
90 90
91 /* Problems on some 486Dx4's and old 386's: */ 91 /* Problems on some 486Dx4's and old 386's: */
92 char hlt_works_ok;
93 char hard_math; 92 char hard_math;
94 char rfu; 93 char rfu;
95 char fdiv_bug; 94 char fdiv_bug;
@@ -165,15 +164,6 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
165 164
166extern const struct seq_operations cpuinfo_op; 165extern const struct seq_operations cpuinfo_op;
167 166
168static inline int hlt_works(int cpu)
169{
170#ifdef CONFIG_X86_32
171 return cpu_data(cpu).hlt_works_ok;
172#else
173 return 1;
174#endif
175}
176
177#define cache_line_size() (boot_cpu_data.x86_cache_alignment) 167#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
178 168
179extern void cpu_detect(struct cpuinfo_x86 *c); 169extern void cpu_detect(struct cpuinfo_x86 *c);
@@ -725,7 +715,7 @@ extern unsigned long boot_option_idle_override;
725extern bool amd_e400_c1e_detected; 715extern bool amd_e400_c1e_detected;
726 716
727enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, 717enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
728 IDLE_POLL, IDLE_FORCE_MWAIT}; 718 IDLE_POLL};
729 719
730extern void enable_sep_cpu(void); 720extern void enable_sep_cpu(void);
731extern int sysenter_setup(void); 721extern int sysenter_setup(void);
@@ -998,7 +988,11 @@ extern unsigned long arch_align_stack(unsigned long sp);
998extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 988extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
999 989
1000void default_idle(void); 990void default_idle(void);
1001bool set_pm_idle_to_default(void); 991#ifdef CONFIG_XEN
992bool xen_set_default_idle(void);
993#else
994#define xen_set_default_idle 0
995#endif
1002 996
1003void stop_this_cpu(void *dummy); 997void stop_this_cpu(void *dummy);
1004 998
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 433a59fb1a74..8d013f5153bc 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -103,6 +103,8 @@
103#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) 103#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
104#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) 104#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
105 105
106#define MSR_IA32_POWER_CTL 0x000001fc
107
106#define MSR_IA32_MC0_CTL 0x00000400 108#define MSR_IA32_MC0_CTL 0x00000400
107#define MSR_IA32_MC0_STATUS 0x00000401 109#define MSR_IA32_MC0_STATUS 0x00000401
108#define MSR_IA32_MC0_ADDR 0x00000402 110#define MSR_IA32_MC0_ADDR 0x00000402
@@ -272,6 +274,7 @@
272#define MSR_IA32_PLATFORM_ID 0x00000017 274#define MSR_IA32_PLATFORM_ID 0x00000017
273#define MSR_IA32_EBL_CR_POWERON 0x0000002a 275#define MSR_IA32_EBL_CR_POWERON 0x0000002a
274#define MSR_EBC_FREQUENCY_ID 0x0000002c 276#define MSR_EBC_FREQUENCY_ID 0x0000002c
277#define MSR_SMI_COUNT 0x00000034
275#define MSR_IA32_FEATURE_CONTROL 0x0000003a 278#define MSR_IA32_FEATURE_CONTROL 0x0000003a
276#define MSR_IA32_TSC_ADJUST 0x0000003b 279#define MSR_IA32_TSC_ADJUST 0x0000003b
277 280
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index d65464e43503..9f4bc6a1164d 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -232,6 +232,7 @@
232#include <linux/acpi.h> 232#include <linux/acpi.h>
233#include <linux/syscore_ops.h> 233#include <linux/syscore_ops.h>
234#include <linux/i8253.h> 234#include <linux/i8253.h>
235#include <linux/cpuidle.h>
235 236
236#include <asm/uaccess.h> 237#include <asm/uaccess.h>
237#include <asm/desc.h> 238#include <asm/desc.h>
@@ -360,13 +361,35 @@ struct apm_user {
360 * idle percentage above which bios idle calls are done 361 * idle percentage above which bios idle calls are done
361 */ 362 */
362#ifdef CONFIG_APM_CPU_IDLE 363#ifdef CONFIG_APM_CPU_IDLE
363#warning deprecated CONFIG_APM_CPU_IDLE will be deleted in 2012
364#define DEFAULT_IDLE_THRESHOLD 95 364#define DEFAULT_IDLE_THRESHOLD 95
365#else 365#else
366#define DEFAULT_IDLE_THRESHOLD 100 366#define DEFAULT_IDLE_THRESHOLD 100
367#endif 367#endif
368#define DEFAULT_IDLE_PERIOD (100 / 3) 368#define DEFAULT_IDLE_PERIOD (100 / 3)
369 369
370static int apm_cpu_idle(struct cpuidle_device *dev,
371 struct cpuidle_driver *drv, int index);
372
373static struct cpuidle_driver apm_idle_driver = {
374 .name = "apm_idle",
375 .owner = THIS_MODULE,
376 .en_core_tk_irqen = 1,
377 .states = {
378 { /* entry 0 is for polling */ },
379 { /* entry 1 is for APM idle */
380 .name = "APM",
381 .desc = "APM idle",
382 .flags = CPUIDLE_FLAG_TIME_VALID,
383 .exit_latency = 250, /* WAG */
384 .target_residency = 500, /* WAG */
385 .enter = &apm_cpu_idle
386 },
387 },
388 .state_count = 2,
389};
390
391static struct cpuidle_device apm_cpuidle_device;
392
370/* 393/*
371 * Local variables 394 * Local variables
372 */ 395 */
@@ -377,7 +400,6 @@ static struct {
377static int clock_slowed; 400static int clock_slowed;
378static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD; 401static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD;
379static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD; 402static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD;
380static int set_pm_idle;
381static int suspends_pending; 403static int suspends_pending;
382static int standbys_pending; 404static int standbys_pending;
383static int ignore_sys_suspend; 405static int ignore_sys_suspend;
@@ -884,8 +906,6 @@ static void apm_do_busy(void)
884#define IDLE_CALC_LIMIT (HZ * 100) 906#define IDLE_CALC_LIMIT (HZ * 100)
885#define IDLE_LEAKY_MAX 16 907#define IDLE_LEAKY_MAX 16
886 908
887static void (*original_pm_idle)(void) __read_mostly;
888
889/** 909/**
890 * apm_cpu_idle - cpu idling for APM capable Linux 910 * apm_cpu_idle - cpu idling for APM capable Linux
891 * 911 *
@@ -894,7 +914,8 @@ static void (*original_pm_idle)(void) __read_mostly;
894 * Furthermore it calls the system default idle routine. 914 * Furthermore it calls the system default idle routine.
895 */ 915 */
896 916
897static void apm_cpu_idle(void) 917static int apm_cpu_idle(struct cpuidle_device *dev,
918 struct cpuidle_driver *drv, int index)
898{ 919{
899 static int use_apm_idle; /* = 0 */ 920 static int use_apm_idle; /* = 0 */
900 static unsigned int last_jiffies; /* = 0 */ 921 static unsigned int last_jiffies; /* = 0 */
@@ -904,7 +925,6 @@ static void apm_cpu_idle(void)
904 unsigned int jiffies_since_last_check = jiffies - last_jiffies; 925 unsigned int jiffies_since_last_check = jiffies - last_jiffies;
905 unsigned int bucket; 926 unsigned int bucket;
906 927
907 WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012");
908recalc: 928recalc:
909 if (jiffies_since_last_check > IDLE_CALC_LIMIT) { 929 if (jiffies_since_last_check > IDLE_CALC_LIMIT) {
910 use_apm_idle = 0; 930 use_apm_idle = 0;
@@ -950,10 +970,7 @@ recalc:
950 break; 970 break;
951 } 971 }
952 } 972 }
953 if (original_pm_idle) 973 default_idle();
954 original_pm_idle();
955 else
956 default_idle();
957 local_irq_disable(); 974 local_irq_disable();
958 jiffies_since_last_check = jiffies - last_jiffies; 975 jiffies_since_last_check = jiffies - last_jiffies;
959 if (jiffies_since_last_check > idle_period) 976 if (jiffies_since_last_check > idle_period)
@@ -963,7 +980,7 @@ recalc:
963 if (apm_idle_done) 980 if (apm_idle_done)
964 apm_do_busy(); 981 apm_do_busy();
965 982
966 local_irq_enable(); 983 return index;
967} 984}
968 985
969/** 986/**
@@ -2381,9 +2398,9 @@ static int __init apm_init(void)
2381 if (HZ != 100) 2398 if (HZ != 100)
2382 idle_period = (idle_period * HZ) / 100; 2399 idle_period = (idle_period * HZ) / 100;
2383 if (idle_threshold < 100) { 2400 if (idle_threshold < 100) {
2384 original_pm_idle = pm_idle; 2401 if (!cpuidle_register_driver(&apm_idle_driver))
2385 pm_idle = apm_cpu_idle; 2402 if (cpuidle_register_device(&apm_cpuidle_device))
2386 set_pm_idle = 1; 2403 cpuidle_unregister_driver(&apm_idle_driver);
2387 } 2404 }
2388 2405
2389 return 0; 2406 return 0;
@@ -2393,15 +2410,9 @@ static void __exit apm_exit(void)
2393{ 2410{
2394 int error; 2411 int error;
2395 2412
2396 if (set_pm_idle) { 2413 cpuidle_unregister_device(&apm_cpuidle_device);
2397 pm_idle = original_pm_idle; 2414 cpuidle_unregister_driver(&apm_idle_driver);
2398 /* 2415
2399 * We are about to unload the current idle thread pm callback
2400 * (pm_idle), Wait for all processors to update cached/local
2401 * copies of pm_idle before proceeding.
2402 */
2403 kick_all_cpus_sync();
2404 }
2405 if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0) 2416 if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0)
2406 && (apm_info.connection_version > 0x0100)) { 2417 && (apm_info.connection_version > 0x0100)) {
2407 error = apm_engage_power_management(APM_DEVICE_ALL, 0); 2418 error = apm_engage_power_management(APM_DEVICE_ALL, 0);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 92dfec986a48..af6455e3fcc9 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -17,15 +17,6 @@
17#include <asm/paravirt.h> 17#include <asm/paravirt.h>
18#include <asm/alternative.h> 18#include <asm/alternative.h>
19 19
20static int __init no_halt(char *s)
21{
22 WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n");
23 boot_cpu_data.hlt_works_ok = 0;
24 return 1;
25}
26
27__setup("no-hlt", no_halt);
28
29static int __init no_387(char *s) 20static int __init no_387(char *s)
30{ 21{
31 boot_cpu_data.hard_math = 0; 22 boot_cpu_data.hard_math = 0;
@@ -89,23 +80,6 @@ static void __init check_fpu(void)
89 pr_warn("Hmm, FPU with FDIV bug\n"); 80 pr_warn("Hmm, FPU with FDIV bug\n");
90} 81}
91 82
92static void __init check_hlt(void)
93{
94 if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
95 return;
96
97 pr_info("Checking 'hlt' instruction... ");
98 if (!boot_cpu_data.hlt_works_ok) {
99 pr_cont("disabled\n");
100 return;
101 }
102 halt();
103 halt();
104 halt();
105 halt();
106 pr_cont("OK\n");
107}
108
109/* 83/*
110 * Check whether we are able to run this kernel safely on SMP. 84 * Check whether we are able to run this kernel safely on SMP.
111 * 85 *
@@ -129,7 +103,6 @@ void __init check_bugs(void)
129 print_cpu_info(&boot_cpu_data); 103 print_cpu_info(&boot_cpu_data);
130#endif 104#endif
131 check_config(); 105 check_config();
132 check_hlt();
133 init_utsname()->machine[1] = 106 init_utsname()->machine[1] =
134 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 107 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
135 alternative_instructions(); 108 alternative_instructions();
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 3286a92e662a..e280253f6f94 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -28,7 +28,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
28{ 28{
29 seq_printf(m, 29 seq_printf(m,
30 "fdiv_bug\t: %s\n" 30 "fdiv_bug\t: %s\n"
31 "hlt_bug\t\t: %s\n"
32 "f00f_bug\t: %s\n" 31 "f00f_bug\t: %s\n"
33 "coma_bug\t: %s\n" 32 "coma_bug\t: %s\n"
34 "fpu\t\t: %s\n" 33 "fpu\t\t: %s\n"
@@ -36,7 +35,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
36 "cpuid level\t: %d\n" 35 "cpuid level\t: %d\n"
37 "wp\t\t: %s\n", 36 "wp\t\t: %s\n",
38 c->fdiv_bug ? "yes" : "no", 37 c->fdiv_bug ? "yes" : "no",
39 c->hlt_works_ok ? "no" : "yes",
40 c->f00f_bug ? "yes" : "no", 38 c->f00f_bug ? "yes" : "no",
41 c->coma_bug ? "yes" : "no", 39 c->coma_bug ? "yes" : "no",
42 c->hard_math ? "yes" : "no", 40 c->hard_math ? "yes" : "no",
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index dcfc1f410dc4..14ae10031ff0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -268,13 +268,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
268unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; 268unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
269EXPORT_SYMBOL(boot_option_idle_override); 269EXPORT_SYMBOL(boot_option_idle_override);
270 270
271/* 271static void (*x86_idle)(void);
272 * Powermanagement idle function, if any..
273 */
274void (*pm_idle)(void);
275#ifdef CONFIG_APM_MODULE
276EXPORT_SYMBOL(pm_idle);
277#endif
278 272
279#ifndef CONFIG_SMP 273#ifndef CONFIG_SMP
280static inline void play_dead(void) 274static inline void play_dead(void)
@@ -351,7 +345,7 @@ void cpu_idle(void)
351 rcu_idle_enter(); 345 rcu_idle_enter();
352 346
353 if (cpuidle_idle_call()) 347 if (cpuidle_idle_call())
354 pm_idle(); 348 x86_idle();
355 349
356 rcu_idle_exit(); 350 rcu_idle_exit();
357 start_critical_timings(); 351 start_critical_timings();
@@ -394,14 +388,16 @@ void default_idle(void)
394EXPORT_SYMBOL(default_idle); 388EXPORT_SYMBOL(default_idle);
395#endif 389#endif
396 390
397bool set_pm_idle_to_default(void) 391#ifdef CONFIG_XEN
392bool xen_set_default_idle(void)
398{ 393{
399 bool ret = !!pm_idle; 394 bool ret = !!x86_idle;
400 395
401 pm_idle = default_idle; 396 x86_idle = default_idle;
402 397
403 return ret; 398 return ret;
404} 399}
400#endif
405void stop_this_cpu(void *dummy) 401void stop_this_cpu(void *dummy)
406{ 402{
407 local_irq_disable(); 403 local_irq_disable();
@@ -411,29 +407,8 @@ void stop_this_cpu(void *dummy)
411 set_cpu_online(smp_processor_id(), false); 407 set_cpu_online(smp_processor_id(), false);
412 disable_local_APIC(); 408 disable_local_APIC();
413 409
414 for (;;) { 410 for (;;)
415 if (hlt_works(smp_processor_id())) 411 halt();
416 halt();
417 }
418}
419
420/* Default MONITOR/MWAIT with no hints, used for default C1 state */
421static void mwait_idle(void)
422{
423 if (!need_resched()) {
424 trace_cpu_idle_rcuidle(1, smp_processor_id());
425 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
426 clflush((void *)&current_thread_info()->flags);
427
428 __monitor((void *)&current_thread_info()->flags, 0, 0);
429 smp_mb();
430 if (!need_resched())
431 __sti_mwait(0, 0);
432 else
433 local_irq_enable();
434 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
435 } else
436 local_irq_enable();
437} 412}
438 413
439/* 414/*
@@ -450,53 +425,6 @@ static void poll_idle(void)
450 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 425 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
451} 426}
452 427
453/*
454 * mwait selection logic:
455 *
456 * It depends on the CPU. For AMD CPUs that support MWAIT this is
457 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
458 * then depend on a clock divisor and current Pstate of the core. If
459 * all cores of a processor are in halt state (C1) the processor can
460 * enter the C1E (C1 enhanced) state. If mwait is used this will never
461 * happen.
462 *
463 * idle=mwait overrides this decision and forces the usage of mwait.
464 */
465
466#define MWAIT_INFO 0x05
467#define MWAIT_ECX_EXTENDED_INFO 0x01
468#define MWAIT_EDX_C1 0xf0
469
470int mwait_usable(const struct cpuinfo_x86 *c)
471{
472 u32 eax, ebx, ecx, edx;
473
474 /* Use mwait if idle=mwait boot option is given */
475 if (boot_option_idle_override == IDLE_FORCE_MWAIT)
476 return 1;
477
478 /*
479 * Any idle= boot option other than idle=mwait means that we must not
480 * use mwait. Eg: idle=halt or idle=poll or idle=nomwait
481 */
482 if (boot_option_idle_override != IDLE_NO_OVERRIDE)
483 return 0;
484
485 if (c->cpuid_level < MWAIT_INFO)
486 return 0;
487
488 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
489 /* Check, whether EDX has extended info about MWAIT */
490 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
491 return 1;
492
493 /*
494 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
495 * C1 supports MWAIT
496 */
497 return (edx & MWAIT_EDX_C1);
498}
499
500bool amd_e400_c1e_detected; 428bool amd_e400_c1e_detected;
501EXPORT_SYMBOL(amd_e400_c1e_detected); 429EXPORT_SYMBOL(amd_e400_c1e_detected);
502 430
@@ -561,31 +489,24 @@ static void amd_e400_idle(void)
561void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 489void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
562{ 490{
563#ifdef CONFIG_SMP 491#ifdef CONFIG_SMP
564 if (pm_idle == poll_idle && smp_num_siblings > 1) { 492 if (x86_idle == poll_idle && smp_num_siblings > 1)
565 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); 493 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
566 }
567#endif 494#endif
568 if (pm_idle) 495 if (x86_idle)
569 return; 496 return;
570 497
571 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { 498 if (cpu_has_amd_erratum(amd_erratum_400)) {
572 /*
573 * One CPU supports mwait => All CPUs supports mwait
574 */
575 pr_info("using mwait in idle threads\n");
576 pm_idle = mwait_idle;
577 } else if (cpu_has_amd_erratum(amd_erratum_400)) {
578 /* E400: APIC timer interrupt does not wake up CPU from C1e */ 499 /* E400: APIC timer interrupt does not wake up CPU from C1e */
579 pr_info("using AMD E400 aware idle routine\n"); 500 pr_info("using AMD E400 aware idle routine\n");
580 pm_idle = amd_e400_idle; 501 x86_idle = amd_e400_idle;
581 } else 502 } else
582 pm_idle = default_idle; 503 x86_idle = default_idle;
583} 504}
584 505
585void __init init_amd_e400_c1e_mask(void) 506void __init init_amd_e400_c1e_mask(void)
586{ 507{
587 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */ 508 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
588 if (pm_idle == amd_e400_idle) 509 if (x86_idle == amd_e400_idle)
589 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL); 510 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
590} 511}
591 512
@@ -596,11 +517,8 @@ static int __init idle_setup(char *str)
596 517
597 if (!strcmp(str, "poll")) { 518 if (!strcmp(str, "poll")) {
598 pr_info("using polling idle threads\n"); 519 pr_info("using polling idle threads\n");
599 pm_idle = poll_idle; 520 x86_idle = poll_idle;
600 boot_option_idle_override = IDLE_POLL; 521 boot_option_idle_override = IDLE_POLL;
601 } else if (!strcmp(str, "mwait")) {
602 boot_option_idle_override = IDLE_FORCE_MWAIT;
603 WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
604 } else if (!strcmp(str, "halt")) { 522 } else if (!strcmp(str, "halt")) {
605 /* 523 /*
606 * When the boot option of idle=halt is added, halt is 524 * When the boot option of idle=halt is added, halt is
@@ -609,7 +527,7 @@ static int __init idle_setup(char *str)
609 * To continue to load the CPU idle driver, don't touch 527 * To continue to load the CPU idle driver, don't touch
610 * the boot_option_idle_override. 528 * the boot_option_idle_override.
611 */ 529 */
612 pm_idle = default_idle; 530 x86_idle = default_idle;
613 boot_option_idle_override = IDLE_HALT; 531 boot_option_idle_override = IDLE_HALT;
614 } else if (!strcmp(str, "nomwait")) { 532 } else if (!strcmp(str, "nomwait")) {
615 /* 533 /*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ed0fe385289d..a6ceaedc396a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1369,7 +1369,7 @@ static inline void mwait_play_dead(void)
1369 void *mwait_ptr; 1369 void *mwait_ptr;
1370 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); 1370 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
1371 1371
1372 if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c))) 1372 if (!this_cpu_has(X86_FEATURE_MWAIT))
1373 return; 1373 return;
1374 if (!this_cpu_has(X86_FEATURE_CLFLSH)) 1374 if (!this_cpu_has(X86_FEATURE_CLFLSH))
1375 return; 1375 return;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 8971a26d21ab..94eac5c85cdc 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -556,12 +556,9 @@ void __init xen_arch_setup(void)
556 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE); 556 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
557 557
558 /* Set up idle, making sure it calls safe_halt() pvop */ 558 /* Set up idle, making sure it calls safe_halt() pvop */
559#ifdef CONFIG_X86_32
560 boot_cpu_data.hlt_works_ok = 1;
561#endif
562 disable_cpuidle(); 559 disable_cpuidle();
563 disable_cpufreq(); 560 disable_cpufreq();
564 WARN_ON(set_pm_idle_to_default()); 561 WARN_ON(xen_set_default_idle());
565 fiddle_vdso(); 562 fiddle_vdso();
566#ifdef CONFIG_NUMA 563#ifdef CONFIG_NUMA
567 numa_off = 1; 564 numa_off = 1;