aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/dts/highbank.dts10
-rw-r--r--arch/arm/kernel/process.c13
-rw-r--r--arch/arm/kernel/smp_twd.c53
-rw-r--r--arch/arm/mach-davinci/cpuidle.c84
-rw-r--r--arch/arm/mach-exynos/include/mach/cpufreq.h19
-rw-r--r--arch/arm/mach-highbank/Kconfig4
-rw-r--r--arch/arm/mach-omap2/pm34xx.c2
-rw-r--r--arch/arm/mach-tegra/cpu-tegra.c3
-rw-r--r--arch/arm64/kernel/process.c13
-rw-r--r--arch/blackfin/kernel/process.c7
-rw-r--r--arch/cris/kernel/process.c11
-rw-r--r--arch/ia64/hp/common/aml_nfw.c2
-rw-r--r--arch/ia64/include/asm/acpi.h4
-rw-r--r--arch/ia64/kernel/process.c3
-rw-r--r--arch/ia64/kernel/setup.c1
-rw-r--r--arch/m32r/kernel/process.c51
-rw-r--r--arch/microblaze/kernel/process.c3
-rw-r--r--arch/mn10300/kernel/process.c7
-rw-r--r--arch/openrisc/kernel/idle.c5
-rw-r--r--arch/sh/kernel/idle.c12
-rw-r--r--arch/sparc/include/asm/processor_32.h1
-rw-r--r--arch/sparc/kernel/apc.c3
-rw-r--r--arch/sparc/kernel/leon_pmc.c5
-rw-r--r--arch/sparc/kernel/pmc.c3
-rw-r--r--arch/sparc/kernel/process_32.c7
-rw-r--r--arch/unicore32/kernel/process.c5
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/include/asm/acpi.h4
-rw-r--r--arch/x86/include/asm/mwait.h3
-rw-r--r--arch/x86/include/asm/processor.h18
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h3
-rw-r--r--arch/x86/kernel/apm_32.c57
-rw-r--r--arch/x86/kernel/cpu/bugs.c27
-rw-r--r--arch/x86/kernel/cpu/proc.c2
-rw-r--r--arch/x86/kernel/process.c122
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/platform/olpc/olpc-xo15-sci.c2
-rw-r--r--arch/x86/xen/setup.c5
38 files changed, 180 insertions, 407 deletions
diff --git a/arch/arm/boot/dts/highbank.dts b/arch/arm/boot/dts/highbank.dts
index 5927a8df5625..6aad34ad9517 100644
--- a/arch/arm/boot/dts/highbank.dts
+++ b/arch/arm/boot/dts/highbank.dts
@@ -37,6 +37,16 @@
37 next-level-cache = <&L2>; 37 next-level-cache = <&L2>;
38 clocks = <&a9pll>; 38 clocks = <&a9pll>;
39 clock-names = "cpu"; 39 clock-names = "cpu";
40 operating-points = <
41 /* kHz ignored */
42 1300000 1000000
43 1200000 1000000
44 1100000 1000000
45 800000 1000000
46 400000 1000000
47 200000 1000000
48 >;
49 clock-latency = <100000>;
40 }; 50 };
41 51
42 cpu@901 { 52 cpu@901 {
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index c6dec5fc20aa..047d3e40e470 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -172,14 +172,9 @@ static void default_idle(void)
172 local_irq_enable(); 172 local_irq_enable();
173} 173}
174 174
175void (*pm_idle)(void) = default_idle;
176EXPORT_SYMBOL(pm_idle);
177
178/* 175/*
179 * The idle thread, has rather strange semantics for calling pm_idle, 176 * The idle thread.
180 * but this is what x86 does and we need to do the same, so that 177 * We always respect 'hlt_counter' to prevent low power idle.
181 * things like cpuidle get called in the same way. The only difference
182 * is that we always respect 'hlt_counter' to prevent low power idle.
183 */ 178 */
184void cpu_idle(void) 179void cpu_idle(void)
185{ 180{
@@ -210,10 +205,10 @@ void cpu_idle(void)
210 } else if (!need_resched()) { 205 } else if (!need_resched()) {
211 stop_critical_timings(); 206 stop_critical_timings();
212 if (cpuidle_idle_call()) 207 if (cpuidle_idle_call())
213 pm_idle(); 208 default_idle();
214 start_critical_timings(); 209 start_critical_timings();
215 /* 210 /*
216 * pm_idle functions must always 211 * default_idle functions must always
217 * return with IRQs enabled. 212 * return with IRQs enabled.
218 */ 213 */
219 WARN_ON(irqs_disabled()); 214 WARN_ON(irqs_disabled());
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 49f335d301ba..ae0c7bb39ae8 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -31,7 +31,6 @@ static void __iomem *twd_base;
31 31
32static struct clk *twd_clk; 32static struct clk *twd_clk;
33static unsigned long twd_timer_rate; 33static unsigned long twd_timer_rate;
34static bool common_setup_called;
35static DEFINE_PER_CPU(bool, percpu_setup_called); 34static DEFINE_PER_CPU(bool, percpu_setup_called);
36 35
37static struct clock_event_device __percpu **twd_evt; 36static struct clock_event_device __percpu **twd_evt;
@@ -239,25 +238,28 @@ static irqreturn_t twd_handler(int irq, void *dev_id)
239 return IRQ_NONE; 238 return IRQ_NONE;
240} 239}
241 240
242static struct clk *twd_get_clock(void) 241static void twd_get_clock(struct device_node *np)
243{ 242{
244 struct clk *clk;
245 int err; 243 int err;
246 244
247 clk = clk_get_sys("smp_twd", NULL); 245 if (np)
248 if (IS_ERR(clk)) { 246 twd_clk = of_clk_get(np, 0);
249 pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk)); 247 else
250 return clk; 248 twd_clk = clk_get_sys("smp_twd", NULL);
249
250 if (IS_ERR(twd_clk)) {
251 pr_err("smp_twd: clock not found %d\n", (int) PTR_ERR(twd_clk));
252 return;
251 } 253 }
252 254
253 err = clk_prepare_enable(clk); 255 err = clk_prepare_enable(twd_clk);
254 if (err) { 256 if (err) {
255 pr_err("smp_twd: clock failed to prepare+enable: %d\n", err); 257 pr_err("smp_twd: clock failed to prepare+enable: %d\n", err);
256 clk_put(clk); 258 clk_put(twd_clk);
257 return ERR_PTR(err); 259 return;
258 } 260 }
259 261
260 return clk; 262 twd_timer_rate = clk_get_rate(twd_clk);
261} 263}
262 264
263/* 265/*
@@ -280,26 +282,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
280 } 282 }
281 per_cpu(percpu_setup_called, cpu) = true; 283 per_cpu(percpu_setup_called, cpu) = true;
282 284
283 /* 285 twd_calibrate_rate();
284 * This stuff only need to be done once for the entire TWD cluster
285 * during the runtime of the system.
286 */
287 if (!common_setup_called) {
288 twd_clk = twd_get_clock();
289
290 /*
291 * We use IS_ERR_OR_NULL() here, because if the clock stubs
292 * are active we will get a valid clk reference which is
293 * however NULL and will return the rate 0. In that case we
294 * need to calibrate the rate instead.
295 */
296 if (!IS_ERR_OR_NULL(twd_clk))
297 twd_timer_rate = clk_get_rate(twd_clk);
298 else
299 twd_calibrate_rate();
300
301 common_setup_called = true;
302 }
303 286
304 /* 287 /*
305 * The following is done once per CPU the first time .setup() is 288 * The following is done once per CPU the first time .setup() is
@@ -330,7 +313,7 @@ static struct local_timer_ops twd_lt_ops __cpuinitdata = {
330 .stop = twd_timer_stop, 313 .stop = twd_timer_stop,
331}; 314};
332 315
333static int __init twd_local_timer_common_register(void) 316static int __init twd_local_timer_common_register(struct device_node *np)
334{ 317{
335 int err; 318 int err;
336 319
@@ -350,6 +333,8 @@ static int __init twd_local_timer_common_register(void)
350 if (err) 333 if (err)
351 goto out_irq; 334 goto out_irq;
352 335
336 twd_get_clock(np);
337
353 return 0; 338 return 0;
354 339
355out_irq: 340out_irq:
@@ -373,7 +358,7 @@ int __init twd_local_timer_register(struct twd_local_timer *tlt)
373 if (!twd_base) 358 if (!twd_base)
374 return -ENOMEM; 359 return -ENOMEM;
375 360
376 return twd_local_timer_common_register(); 361 return twd_local_timer_common_register(NULL);
377} 362}
378 363
379#ifdef CONFIG_OF 364#ifdef CONFIG_OF
@@ -405,7 +390,7 @@ void __init twd_local_timer_of_register(void)
405 goto out; 390 goto out;
406 } 391 }
407 392
408 err = twd_local_timer_common_register(); 393 err = twd_local_timer_common_register(np);
409 394
410out: 395out:
411 WARN(err, "twd_local_timer_of_register failed (%d)\n", err); 396 WARN(err, "twd_local_timer_of_register failed (%d)\n", err);
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index 9107691adbdb..5ac9e9384b15 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -25,35 +25,44 @@
25 25
26#define DAVINCI_CPUIDLE_MAX_STATES 2 26#define DAVINCI_CPUIDLE_MAX_STATES 2
27 27
28struct davinci_ops { 28static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
29 void (*enter) (u32 flags); 29static void __iomem *ddr2_reg_base;
30 void (*exit) (u32 flags); 30static bool ddr2_pdown;
31 u32 flags; 31
32}; 32static void davinci_save_ddr_power(int enter, bool pdown)
33{
34 u32 val;
35
36 val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET);
37
38 if (enter) {
39 if (pdown)
40 val |= DDR2_SRPD_BIT;
41 else
42 val &= ~DDR2_SRPD_BIT;
43 val |= DDR2_LPMODEN_BIT;
44 } else {
45 val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT);
46 }
47
48 __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET);
49}
33 50
34/* Actual code that puts the SoC in different idle states */ 51/* Actual code that puts the SoC in different idle states */
35static int davinci_enter_idle(struct cpuidle_device *dev, 52static int davinci_enter_idle(struct cpuidle_device *dev,
36 struct cpuidle_driver *drv, 53 struct cpuidle_driver *drv,
37 int index) 54 int index)
38{ 55{
39 struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 56 davinci_save_ddr_power(1, ddr2_pdown);
40 struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
41
42 if (ops && ops->enter)
43 ops->enter(ops->flags);
44 57
45 index = cpuidle_wrap_enter(dev, drv, index, 58 index = cpuidle_wrap_enter(dev, drv, index,
46 arm_cpuidle_simple_enter); 59 arm_cpuidle_simple_enter);
47 60
48 if (ops && ops->exit) 61 davinci_save_ddr_power(0, ddr2_pdown);
49 ops->exit(ops->flags);
50 62
51 return index; 63 return index;
52} 64}
53 65
54/* fields in davinci_ops.flags */
55#define DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN BIT(0)
56
57static struct cpuidle_driver davinci_idle_driver = { 66static struct cpuidle_driver davinci_idle_driver = {
58 .name = "cpuidle-davinci", 67 .name = "cpuidle-davinci",
59 .owner = THIS_MODULE, 68 .owner = THIS_MODULE,
@@ -70,45 +79,6 @@ static struct cpuidle_driver davinci_idle_driver = {
70 .state_count = DAVINCI_CPUIDLE_MAX_STATES, 79 .state_count = DAVINCI_CPUIDLE_MAX_STATES,
71}; 80};
72 81
73static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
74static void __iomem *ddr2_reg_base;
75
76static void davinci_save_ddr_power(int enter, bool pdown)
77{
78 u32 val;
79
80 val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET);
81
82 if (enter) {
83 if (pdown)
84 val |= DDR2_SRPD_BIT;
85 else
86 val &= ~DDR2_SRPD_BIT;
87 val |= DDR2_LPMODEN_BIT;
88 } else {
89 val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT);
90 }
91
92 __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET);
93}
94
95static void davinci_c2state_enter(u32 flags)
96{
97 davinci_save_ddr_power(1, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN));
98}
99
100static void davinci_c2state_exit(u32 flags)
101{
102 davinci_save_ddr_power(0, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN));
103}
104
105static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = {
106 [1] = {
107 .enter = davinci_c2state_enter,
108 .exit = davinci_c2state_exit,
109 },
110};
111
112static int __init davinci_cpuidle_probe(struct platform_device *pdev) 82static int __init davinci_cpuidle_probe(struct platform_device *pdev)
113{ 83{
114 int ret; 84 int ret;
@@ -124,11 +94,7 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
124 94
125 ddr2_reg_base = pdata->ddr2_ctlr_base; 95 ddr2_reg_base = pdata->ddr2_ctlr_base;
126 96
127 if (pdata->ddr2_pdown) 97 ddr2_pdown = pdata->ddr2_pdown;
128 davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
129 cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]);
130
131 device->state_count = DAVINCI_CPUIDLE_MAX_STATES;
132 98
133 ret = cpuidle_register_driver(&davinci_idle_driver); 99 ret = cpuidle_register_driver(&davinci_idle_driver);
134 if (ret) { 100 if (ret) {
diff --git a/arch/arm/mach-exynos/include/mach/cpufreq.h b/arch/arm/mach-exynos/include/mach/cpufreq.h
index 7517c3f417af..b5d39dd03b2a 100644
--- a/arch/arm/mach-exynos/include/mach/cpufreq.h
+++ b/arch/arm/mach-exynos/include/mach/cpufreq.h
@@ -18,12 +18,25 @@ enum cpufreq_level_index {
18 L20, 18 L20,
19}; 19};
20 20
21#define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \
22 { \
23 .freq = (f) * 1000, \
24 .clk_div_cpu0 = ((a0) | (a1) << 4 | (a2) << 8 | (a3) << 12 | \
25 (a4) << 16 | (a5) << 20 | (a6) << 24 | (a7) << 28), \
26 .clk_div_cpu1 = (b0 << 0 | b1 << 4 | b2 << 8), \
27 .mps = ((m) << 16 | (p) << 8 | (s)), \
28 }
29
30struct apll_freq {
31 unsigned int freq;
32 u32 clk_div_cpu0;
33 u32 clk_div_cpu1;
34 u32 mps;
35};
36
21struct exynos_dvfs_info { 37struct exynos_dvfs_info {
22 unsigned long mpll_freq_khz; 38 unsigned long mpll_freq_khz;
23 unsigned int pll_safe_idx; 39 unsigned int pll_safe_idx;
24 unsigned int pm_lock_idx;
25 unsigned int max_support_idx;
26 unsigned int min_support_idx;
27 struct clk *cpu_clk; 40 struct clk *cpu_clk;
28 unsigned int *volt_table; 41 unsigned int *volt_table;
29 struct cpufreq_frequency_table *freq_table; 42 struct cpufreq_frequency_table *freq_table;
diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig
index 551c97e87a78..44b12f9c1584 100644
--- a/arch/arm/mach-highbank/Kconfig
+++ b/arch/arm/mach-highbank/Kconfig
@@ -1,5 +1,7 @@
1config ARCH_HIGHBANK 1config ARCH_HIGHBANK
2 bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7 2 bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7
3 select ARCH_HAS_CPUFREQ
4 select ARCH_HAS_OPP
3 select ARCH_WANT_OPTIONAL_GPIOLIB 5 select ARCH_WANT_OPTIONAL_GPIOLIB
4 select ARM_AMBA 6 select ARM_AMBA
5 select ARM_GIC 7 select ARM_GIC
@@ -11,5 +13,7 @@ config ARCH_HIGHBANK
11 select GENERIC_CLOCKEVENTS 13 select GENERIC_CLOCKEVENTS
12 select HAVE_ARM_SCU 14 select HAVE_ARM_SCU
13 select HAVE_SMP 15 select HAVE_SMP
16 select MAILBOX
17 select PL320_MBOX
14 select SPARSE_IRQ 18 select SPARSE_IRQ
15 select USE_OF 19 select USE_OF
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 7be3622cfc85..2d93d8b23835 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -351,12 +351,10 @@ static void omap3_pm_idle(void)
351 if (omap_irq_pending()) 351 if (omap_irq_pending())
352 goto out; 352 goto out;
353 353
354 trace_power_start(POWER_CSTATE, 1, smp_processor_id());
355 trace_cpu_idle(1, smp_processor_id()); 354 trace_cpu_idle(1, smp_processor_id());
356 355
357 omap_sram_idle(); 356 omap_sram_idle();
358 357
359 trace_power_end(smp_processor_id());
360 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 358 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
361 359
362out: 360out:
diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c
index a74d3c7d2e26..a36a03d3c9a0 100644
--- a/arch/arm/mach-tegra/cpu-tegra.c
+++ b/arch/arm/mach-tegra/cpu-tegra.c
@@ -243,8 +243,7 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
243 /* FIXME: what's the actual transition time? */ 243 /* FIXME: what's the actual transition time? */
244 policy->cpuinfo.transition_latency = 300 * 1000; 244 policy->cpuinfo.transition_latency = 300 * 1000;
245 245
246 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 246 cpumask_copy(policy->cpus, cpu_possible_mask);
247 cpumask_copy(policy->related_cpus, cpu_possible_mask);
248 247
249 if (policy->cpu == 0) 248 if (policy->cpu == 0)
250 register_pm_notifier(&tegra_cpu_pm_notifier); 249 register_pm_notifier(&tegra_cpu_pm_notifier);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index cb0956bc96ed..c7002d40a9b0 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -97,14 +97,9 @@ static void default_idle(void)
97 local_irq_enable(); 97 local_irq_enable();
98} 98}
99 99
100void (*pm_idle)(void) = default_idle;
101EXPORT_SYMBOL_GPL(pm_idle);
102
103/* 100/*
104 * The idle thread, has rather strange semantics for calling pm_idle, 101 * The idle thread.
105 * but this is what x86 does and we need to do the same, so that 102 * We always respect 'hlt_counter' to prevent low power idle.
106 * things like cpuidle get called in the same way. The only difference
107 * is that we always respect 'hlt_counter' to prevent low power idle.
108 */ 103 */
109void cpu_idle(void) 104void cpu_idle(void)
110{ 105{
@@ -122,10 +117,10 @@ void cpu_idle(void)
122 local_irq_disable(); 117 local_irq_disable();
123 if (!need_resched()) { 118 if (!need_resched()) {
124 stop_critical_timings(); 119 stop_critical_timings();
125 pm_idle(); 120 default_idle();
126 start_critical_timings(); 121 start_critical_timings();
127 /* 122 /*
128 * pm_idle functions should always return 123 * default_idle functions should always return
129 * with IRQs enabled. 124 * with IRQs enabled.
130 */ 125 */
131 WARN_ON(irqs_disabled()); 126 WARN_ON(irqs_disabled());
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 3e16ad9b0a99..8061426b7df5 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -39,12 +39,6 @@ int nr_l1stack_tasks;
39void *l1_stack_base; 39void *l1_stack_base;
40unsigned long l1_stack_len; 40unsigned long l1_stack_len;
41 41
42/*
43 * Powermanagement idle function, if any..
44 */
45void (*pm_idle)(void) = NULL;
46EXPORT_SYMBOL(pm_idle);
47
48void (*pm_power_off)(void) = NULL; 42void (*pm_power_off)(void) = NULL;
49EXPORT_SYMBOL(pm_power_off); 43EXPORT_SYMBOL(pm_power_off);
50 44
@@ -81,7 +75,6 @@ void cpu_idle(void)
81{ 75{
82 /* endless idle loop with no priority at all */ 76 /* endless idle loop with no priority at all */
83 while (1) { 77 while (1) {
84 void (*idle)(void) = pm_idle;
85 78
86#ifdef CONFIG_HOTPLUG_CPU 79#ifdef CONFIG_HOTPLUG_CPU
87 if (cpu_is_offline(smp_processor_id())) 80 if (cpu_is_offline(smp_processor_id()))
diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c
index 7f65be6f7f17..104ff4dd9b98 100644
--- a/arch/cris/kernel/process.c
+++ b/arch/cris/kernel/process.c
@@ -54,11 +54,6 @@ void enable_hlt(void)
54 54
55EXPORT_SYMBOL(enable_hlt); 55EXPORT_SYMBOL(enable_hlt);
56 56
57/*
58 * The following aren't currently used.
59 */
60void (*pm_idle)(void);
61
62extern void default_idle(void); 57extern void default_idle(void);
63 58
64void (*pm_power_off)(void); 59void (*pm_power_off)(void);
@@ -77,16 +72,12 @@ void cpu_idle (void)
77 while (1) { 72 while (1) {
78 rcu_idle_enter(); 73 rcu_idle_enter();
79 while (!need_resched()) { 74 while (!need_resched()) {
80 void (*idle)(void);
81 /* 75 /*
82 * Mark this as an RCU critical section so that 76 * Mark this as an RCU critical section so that
83 * synchronize_kernel() in the unload path waits 77 * synchronize_kernel() in the unload path waits
84 * for our completion. 78 * for our completion.
85 */ 79 */
86 idle = pm_idle; 80 default_idle();
87 if (!idle)
88 idle = default_idle;
89 idle();
90 } 81 }
91 rcu_idle_exit(); 82 rcu_idle_exit();
92 schedule_preempt_disabled(); 83 schedule_preempt_disabled();
diff --git a/arch/ia64/hp/common/aml_nfw.c b/arch/ia64/hp/common/aml_nfw.c
index 6192f7188654..916ffe770bcf 100644
--- a/arch/ia64/hp/common/aml_nfw.c
+++ b/arch/ia64/hp/common/aml_nfw.c
@@ -191,7 +191,7 @@ static int aml_nfw_add(struct acpi_device *device)
191 return aml_nfw_add_global_handler(); 191 return aml_nfw_add_global_handler();
192} 192}
193 193
194static int aml_nfw_remove(struct acpi_device *device, int type) 194static int aml_nfw_remove(struct acpi_device *device)
195{ 195{
196 return aml_nfw_remove_global_handler(); 196 return aml_nfw_remove_global_handler();
197} 197}
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 359e68a03ca3..faa1bf0da815 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -52,10 +52,6 @@
52 52
53/* Asm macros */ 53/* Asm macros */
54 54
55#define ACPI_ASM_MACROS
56#define BREAKPOINT3
57#define ACPI_DISABLE_IRQS() local_irq_disable()
58#define ACPI_ENABLE_IRQS() local_irq_enable()
59#define ACPI_FLUSH_CPU_CACHE() 55#define ACPI_FLUSH_CPU_CACHE()
60 56
61static inline int 57static inline int
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 31360cbbd5f8..e34f565f595a 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -57,8 +57,6 @@ void (*ia64_mark_idle)(int);
57 57
58unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; 58unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
59EXPORT_SYMBOL(boot_option_idle_override); 59EXPORT_SYMBOL(boot_option_idle_override);
60void (*pm_idle) (void);
61EXPORT_SYMBOL(pm_idle);
62void (*pm_power_off) (void); 60void (*pm_power_off) (void);
63EXPORT_SYMBOL(pm_power_off); 61EXPORT_SYMBOL(pm_power_off);
64 62
@@ -301,7 +299,6 @@ cpu_idle (void)
301 if (mark_idle) 299 if (mark_idle)
302 (*mark_idle)(1); 300 (*mark_idle)(1);
303 301
304 idle = pm_idle;
305 if (!idle) 302 if (!idle)
306 idle = default_idle; 303 idle = default_idle;
307 (*idle)(); 304 (*idle)();
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index aaefd9b94f2f..2029cc0d2fc6 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -1051,7 +1051,6 @@ cpu_init (void)
1051 max_num_phys_stacked = num_phys_stacked; 1051 max_num_phys_stacked = num_phys_stacked;
1052 } 1052 }
1053 platform_cpu_init(); 1053 platform_cpu_init();
1054 pm_idle = default_idle;
1055} 1054}
1056 1055
1057void __init 1056void __init
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index 765d0f57c787..bde899e155d3 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -44,36 +44,10 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
44 return tsk->thread.lr; 44 return tsk->thread.lr;
45} 45}
46 46
47/*
48 * Powermanagement idle function, if any..
49 */
50static void (*pm_idle)(void) = NULL;
51
52void (*pm_power_off)(void) = NULL; 47void (*pm_power_off)(void) = NULL;
53EXPORT_SYMBOL(pm_power_off); 48EXPORT_SYMBOL(pm_power_off);
54 49
55/* 50/*
56 * We use this is we don't have any better
57 * idle routine..
58 */
59static void default_idle(void)
60{
61 /* M32R_FIXME: Please use "cpu_sleep" mode. */
62 cpu_relax();
63}
64
65/*
66 * On SMP it's slightly faster (but much more power-consuming!)
67 * to poll the ->work.need_resched flag instead of waiting for the
68 * cross-CPU IPI to arrive. Use this option with caution.
69 */
70static void poll_idle (void)
71{
72 /* M32R_FIXME */
73 cpu_relax();
74}
75
76/*
77 * The idle thread. There's no useful work to be 51 * The idle thread. There's no useful work to be
78 * done, so just try to conserve power and have a 52 * done, so just try to conserve power and have a
79 * low exit latency (ie sit in a loop waiting for 53 * low exit latency (ie sit in a loop waiting for
@@ -84,14 +58,8 @@ void cpu_idle (void)
84 /* endless idle loop with no priority at all */ 58 /* endless idle loop with no priority at all */
85 while (1) { 59 while (1) {
86 rcu_idle_enter(); 60 rcu_idle_enter();
87 while (!need_resched()) { 61 while (!need_resched())
88 void (*idle)(void) = pm_idle; 62 cpu_relax();
89
90 if (!idle)
91 idle = default_idle;
92
93 idle();
94 }
95 rcu_idle_exit(); 63 rcu_idle_exit();
96 schedule_preempt_disabled(); 64 schedule_preempt_disabled();
97 } 65 }
@@ -120,21 +88,6 @@ void machine_power_off(void)
120 /* M32R_FIXME */ 88 /* M32R_FIXME */
121} 89}
122 90
123static int __init idle_setup (char *str)
124{
125 if (!strncmp(str, "poll", 4)) {
126 printk("using poll in idle threads.\n");
127 pm_idle = poll_idle;
128 } else if (!strncmp(str, "sleep", 4)) {
129 printk("using sleep in idle threads.\n");
130 pm_idle = default_idle;
131 }
132
133 return 1;
134}
135
136__setup("idle=", idle_setup);
137
138void show_regs(struct pt_regs * regs) 91void show_regs(struct pt_regs * regs)
139{ 92{
140 printk("\n"); 93 printk("\n");
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index a5b74f729e5b..6ff2dcff3410 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -41,7 +41,6 @@ void show_regs(struct pt_regs *regs)
41 regs->msr, regs->ear, regs->esr, regs->fsr); 41 regs->msr, regs->ear, regs->esr, regs->fsr);
42} 42}
43 43
44void (*pm_idle)(void);
45void (*pm_power_off)(void) = NULL; 44void (*pm_power_off)(void) = NULL;
46EXPORT_SYMBOL(pm_power_off); 45EXPORT_SYMBOL(pm_power_off);
47 46
@@ -98,8 +97,6 @@ void cpu_idle(void)
98 97
99 /* endless idle loop with no priority at all */ 98 /* endless idle loop with no priority at all */
100 while (1) { 99 while (1) {
101 void (*idle)(void) = pm_idle;
102
103 if (!idle) 100 if (!idle)
104 idle = default_idle; 101 idle = default_idle;
105 102
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index eb09f5a552ff..84f4e97e3074 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -37,12 +37,6 @@
37#include "internal.h" 37#include "internal.h"
38 38
39/* 39/*
40 * power management idle function, if any..
41 */
42void (*pm_idle)(void);
43EXPORT_SYMBOL(pm_idle);
44
45/*
46 * return saved PC of a blocked thread. 40 * return saved PC of a blocked thread.
47 */ 41 */
48unsigned long thread_saved_pc(struct task_struct *tsk) 42unsigned long thread_saved_pc(struct task_struct *tsk)
@@ -113,7 +107,6 @@ void cpu_idle(void)
113 void (*idle)(void); 107 void (*idle)(void);
114 108
115 smp_rmb(); 109 smp_rmb();
116 idle = pm_idle;
117 if (!idle) { 110 if (!idle) {
118#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU) 111#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
119 idle = poll_idle; 112 idle = poll_idle;
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c
index 7d618feb1b72..5e8a3b6d6bc6 100644
--- a/arch/openrisc/kernel/idle.c
+++ b/arch/openrisc/kernel/idle.c
@@ -39,11 +39,6 @@
39 39
40void (*powersave) (void) = NULL; 40void (*powersave) (void) = NULL;
41 41
42static inline void pm_idle(void)
43{
44 barrier();
45}
46
47void cpu_idle(void) 42void cpu_idle(void)
48{ 43{
49 set_thread_flag(TIF_POLLING_NRFLAG); 44 set_thread_flag(TIF_POLLING_NRFLAG);
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 0c910163caa3..3d5a1b387cc0 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -22,7 +22,7 @@
22#include <asm/smp.h> 22#include <asm/smp.h>
23#include <asm/bl_bit.h> 23#include <asm/bl_bit.h>
24 24
25void (*pm_idle)(void); 25static void (*sh_idle)(void);
26 26
27static int hlt_counter; 27static int hlt_counter;
28 28
@@ -103,9 +103,9 @@ void cpu_idle(void)
103 /* Don't trace irqs off for idle */ 103 /* Don't trace irqs off for idle */
104 stop_critical_timings(); 104 stop_critical_timings();
105 if (cpuidle_idle_call()) 105 if (cpuidle_idle_call())
106 pm_idle(); 106 sh_idle();
107 /* 107 /*
108 * Sanity check to ensure that pm_idle() returns 108 * Sanity check to ensure that sh_idle() returns
109 * with IRQs enabled 109 * with IRQs enabled
110 */ 110 */
111 WARN_ON(irqs_disabled()); 111 WARN_ON(irqs_disabled());
@@ -123,13 +123,13 @@ void __init select_idle_routine(void)
123 /* 123 /*
124 * If a platform has set its own idle routine, leave it alone. 124 * If a platform has set its own idle routine, leave it alone.
125 */ 125 */
126 if (pm_idle) 126 if (sh_idle)
127 return; 127 return;
128 128
129 if (hlt_works()) 129 if (hlt_works())
130 pm_idle = default_idle; 130 sh_idle = default_idle;
131 else 131 else
132 pm_idle = poll_idle; 132 sh_idle = poll_idle;
133} 133}
134 134
135void stop_this_cpu(void *unused) 135void stop_this_cpu(void *unused)
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index c1e01914fd98..2c7baa4c4505 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -118,6 +118,7 @@ extern unsigned long get_wchan(struct task_struct *);
118extern struct task_struct *last_task_used_math; 118extern struct task_struct *last_task_used_math;
119 119
120#define cpu_relax() barrier() 120#define cpu_relax() barrier()
121extern void (*sparc_idle)(void);
121 122
122#endif 123#endif
123 124
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index 348fa1aeabce..eefda32b595e 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -20,6 +20,7 @@
20#include <asm/uaccess.h> 20#include <asm/uaccess.h>
21#include <asm/auxio.h> 21#include <asm/auxio.h>
22#include <asm/apc.h> 22#include <asm/apc.h>
23#include <asm/processor.h>
23 24
24/* Debugging 25/* Debugging
25 * 26 *
@@ -158,7 +159,7 @@ static int apc_probe(struct platform_device *op)
158 159
159 /* Assign power management IDLE handler */ 160 /* Assign power management IDLE handler */
160 if (!apc_no_idle) 161 if (!apc_no_idle)
161 pm_idle = apc_swift_idle; 162 sparc_idle = apc_swift_idle;
162 163
163 printk(KERN_INFO "%s: power management initialized%s\n", 164 printk(KERN_INFO "%s: power management initialized%s\n",
164 APC_DEVNAME, apc_no_idle ? " (CPU idle disabled)" : ""); 165 APC_DEVNAME, apc_no_idle ? " (CPU idle disabled)" : "");
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
index 4e174321097d..708bca435219 100644
--- a/arch/sparc/kernel/leon_pmc.c
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -9,6 +9,7 @@
9#include <asm/leon_amba.h> 9#include <asm/leon_amba.h>
10#include <asm/cpu_type.h> 10#include <asm/cpu_type.h>
11#include <asm/leon.h> 11#include <asm/leon.h>
12#include <asm/processor.h>
12 13
13/* List of Systems that need fixup instructions around power-down instruction */ 14/* List of Systems that need fixup instructions around power-down instruction */
14unsigned int pmc_leon_fixup_ids[] = { 15unsigned int pmc_leon_fixup_ids[] = {
@@ -69,9 +70,9 @@ static int __init leon_pmc_install(void)
69 if (sparc_cpu_model == sparc_leon) { 70 if (sparc_cpu_model == sparc_leon) {
70 /* Assign power management IDLE handler */ 71 /* Assign power management IDLE handler */
71 if (pmc_leon_need_fixup()) 72 if (pmc_leon_need_fixup())
72 pm_idle = pmc_leon_idle_fixup; 73 sparc_idle = pmc_leon_idle_fixup;
73 else 74 else
74 pm_idle = pmc_leon_idle; 75 sparc_idle = pmc_leon_idle;
75 76
76 printk(KERN_INFO "leon: power management initialized\n"); 77 printk(KERN_INFO "leon: power management initialized\n");
77 } 78 }
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
index dcbb62f63068..8b7297faca79 100644
--- a/arch/sparc/kernel/pmc.c
+++ b/arch/sparc/kernel/pmc.c
@@ -17,6 +17,7 @@
17#include <asm/oplib.h> 17#include <asm/oplib.h>
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include <asm/auxio.h> 19#include <asm/auxio.h>
20#include <asm/processor.h>
20 21
21/* Debug 22/* Debug
22 * 23 *
@@ -63,7 +64,7 @@ static int pmc_probe(struct platform_device *op)
63 64
64#ifndef PMC_NO_IDLE 65#ifndef PMC_NO_IDLE
65 /* Assign power management IDLE handler */ 66 /* Assign power management IDLE handler */
66 pm_idle = pmc_swift_idle; 67 sparc_idle = pmc_swift_idle;
67#endif 68#endif
68 69
69 printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME); 70 printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME);
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index be8e862badaf..62eede13831a 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -43,8 +43,7 @@
43 * Power management idle function 43 * Power management idle function
44 * Set in pm platform drivers (apc.c and pmc.c) 44 * Set in pm platform drivers (apc.c and pmc.c)
45 */ 45 */
46void (*pm_idle)(void); 46void (*sparc_idle)(void);
47EXPORT_SYMBOL(pm_idle);
48 47
49/* 48/*
50 * Power-off handler instantiation for pm.h compliance 49 * Power-off handler instantiation for pm.h compliance
@@ -75,8 +74,8 @@ void cpu_idle(void)
75 /* endless idle loop with no priority at all */ 74 /* endless idle loop with no priority at all */
76 for (;;) { 75 for (;;) {
77 while (!need_resched()) { 76 while (!need_resched()) {
78 if (pm_idle) 77 if (sparc_idle)
79 (*pm_idle)(); 78 (*sparc_idle)();
80 else 79 else
81 cpu_relax(); 80 cpu_relax();
82 } 81 }
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c
index 62bad9fed03e..872d7e22d847 100644
--- a/arch/unicore32/kernel/process.c
+++ b/arch/unicore32/kernel/process.c
@@ -45,11 +45,6 @@ static const char * const processor_modes[] = {
45 "UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR" 45 "UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR"
46}; 46};
47 47
48/*
49 * The idle thread, has rather strange semantics for calling pm_idle,
50 * but this is what x86 does and we need to do the same, so that
51 * things like cpuidle get called in the same way.
52 */
53void cpu_idle(void) 48void cpu_idle(void)
54{ 49{
55 /* endless idle loop with no priority at all */ 50 /* endless idle loop with no priority at all */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 260857a53b87..f7a27fdb5098 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -469,6 +469,16 @@ config X86_MDFLD
469 469
470endif 470endif
471 471
472config X86_INTEL_LPSS
473 bool "Intel Low Power Subsystem Support"
474 depends on ACPI
475 select COMMON_CLK
476 ---help---
477 Select to build support for Intel Low Power Subsystem such as
478 found on Intel Lynxpoint PCH. Selecting this option enables
479 things like clock tree (common clock framework) which are needed
480 by the LPSS peripheral drivers.
481
472config X86_RDC321X 482config X86_RDC321X
473 bool "RDC R-321x SoC" 483 bool "RDC R-321x SoC"
474 depends on X86_32 484 depends on X86_32
@@ -1927,6 +1937,7 @@ config APM_DO_ENABLE
1927 this feature. 1937 this feature.
1928 1938
1929config APM_CPU_IDLE 1939config APM_CPU_IDLE
1940 depends on CPU_IDLE
1930 bool "Make CPU Idle calls when idle" 1941 bool "Make CPU Idle calls when idle"
1931 ---help--- 1942 ---help---
1932 Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop. 1943 Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop.
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 0c44630d1789..b31bf97775fc 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -49,10 +49,6 @@
49 49
50/* Asm macros */ 50/* Asm macros */
51 51
52#define ACPI_ASM_MACROS
53#define BREAKPOINT3
54#define ACPI_DISABLE_IRQS() local_irq_disable()
55#define ACPI_ENABLE_IRQS() local_irq_enable()
56#define ACPI_FLUSH_CPU_CACHE() wbinvd() 52#define ACPI_FLUSH_CPU_CACHE() wbinvd()
57 53
58int __acpi_acquire_global_lock(unsigned int *lock); 54int __acpi_acquire_global_lock(unsigned int *lock);
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index bcdff997668c..2f366d0ac6b4 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -4,7 +4,8 @@
4#define MWAIT_SUBSTATE_MASK 0xf 4#define MWAIT_SUBSTATE_MASK 0xf
5#define MWAIT_CSTATE_MASK 0xf 5#define MWAIT_CSTATE_MASK 0xf
6#define MWAIT_SUBSTATE_SIZE 4 6#define MWAIT_SUBSTATE_SIZE 4
7#define MWAIT_MAX_NUM_CSTATES 8 7#define MWAIT_HINT2CSTATE(hint) (((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK)
8#define MWAIT_HINT2SUBSTATE(hint) ((hint) & MWAIT_CSTATE_MASK)
8 9
9#define CPUID_MWAIT_LEAF 5 10#define CPUID_MWAIT_LEAF 5
10#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1 11#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index cf500543f6ff..d172588efae5 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -89,7 +89,6 @@ struct cpuinfo_x86 {
89 char wp_works_ok; /* It doesn't on 386's */ 89 char wp_works_ok; /* It doesn't on 386's */
90 90
91 /* Problems on some 486Dx4's and old 386's: */ 91 /* Problems on some 486Dx4's and old 386's: */
92 char hlt_works_ok;
93 char hard_math; 92 char hard_math;
94 char rfu; 93 char rfu;
95 char fdiv_bug; 94 char fdiv_bug;
@@ -165,15 +164,6 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
165 164
166extern const struct seq_operations cpuinfo_op; 165extern const struct seq_operations cpuinfo_op;
167 166
168static inline int hlt_works(int cpu)
169{
170#ifdef CONFIG_X86_32
171 return cpu_data(cpu).hlt_works_ok;
172#else
173 return 1;
174#endif
175}
176
177#define cache_line_size() (boot_cpu_data.x86_cache_alignment) 167#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
178 168
179extern void cpu_detect(struct cpuinfo_x86 *c); 169extern void cpu_detect(struct cpuinfo_x86 *c);
@@ -725,7 +715,7 @@ extern unsigned long boot_option_idle_override;
725extern bool amd_e400_c1e_detected; 715extern bool amd_e400_c1e_detected;
726 716
727enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, 717enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
728 IDLE_POLL, IDLE_FORCE_MWAIT}; 718 IDLE_POLL};
729 719
730extern void enable_sep_cpu(void); 720extern void enable_sep_cpu(void);
731extern int sysenter_setup(void); 721extern int sysenter_setup(void);
@@ -998,7 +988,11 @@ extern unsigned long arch_align_stack(unsigned long sp);
998extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 988extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
999 989
1000void default_idle(void); 990void default_idle(void);
1001bool set_pm_idle_to_default(void); 991#ifdef CONFIG_XEN
992bool xen_set_default_idle(void);
993#else
994#define xen_set_default_idle 0
995#endif
1002 996
1003void stop_this_cpu(void *dummy); 997void stop_this_cpu(void *dummy);
1004 998
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 075a40255591..f26d2771846f 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -103,6 +103,8 @@
103#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) 103#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
104#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) 104#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
105 105
106#define MSR_IA32_POWER_CTL 0x000001fc
107
106#define MSR_IA32_MC0_CTL 0x00000400 108#define MSR_IA32_MC0_CTL 0x00000400
107#define MSR_IA32_MC0_STATUS 0x00000401 109#define MSR_IA32_MC0_STATUS 0x00000401
108#define MSR_IA32_MC0_ADDR 0x00000402 110#define MSR_IA32_MC0_ADDR 0x00000402
@@ -274,6 +276,7 @@
274#define MSR_IA32_PLATFORM_ID 0x00000017 276#define MSR_IA32_PLATFORM_ID 0x00000017
275#define MSR_IA32_EBL_CR_POWERON 0x0000002a 277#define MSR_IA32_EBL_CR_POWERON 0x0000002a
276#define MSR_EBC_FREQUENCY_ID 0x0000002c 278#define MSR_EBC_FREQUENCY_ID 0x0000002c
279#define MSR_SMI_COUNT 0x00000034
277#define MSR_IA32_FEATURE_CONTROL 0x0000003a 280#define MSR_IA32_FEATURE_CONTROL 0x0000003a
278#define MSR_IA32_TSC_ADJUST 0x0000003b 281#define MSR_IA32_TSC_ADJUST 0x0000003b
279 282
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 8d7012b7f402..66b5faffe14a 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -232,6 +232,7 @@
232#include <linux/acpi.h> 232#include <linux/acpi.h>
233#include <linux/syscore_ops.h> 233#include <linux/syscore_ops.h>
234#include <linux/i8253.h> 234#include <linux/i8253.h>
235#include <linux/cpuidle.h>
235 236
236#include <asm/uaccess.h> 237#include <asm/uaccess.h>
237#include <asm/desc.h> 238#include <asm/desc.h>
@@ -360,13 +361,35 @@ struct apm_user {
360 * idle percentage above which bios idle calls are done 361 * idle percentage above which bios idle calls are done
361 */ 362 */
362#ifdef CONFIG_APM_CPU_IDLE 363#ifdef CONFIG_APM_CPU_IDLE
363#warning deprecated CONFIG_APM_CPU_IDLE will be deleted in 2012
364#define DEFAULT_IDLE_THRESHOLD 95 364#define DEFAULT_IDLE_THRESHOLD 95
365#else 365#else
366#define DEFAULT_IDLE_THRESHOLD 100 366#define DEFAULT_IDLE_THRESHOLD 100
367#endif 367#endif
368#define DEFAULT_IDLE_PERIOD (100 / 3) 368#define DEFAULT_IDLE_PERIOD (100 / 3)
369 369
370static int apm_cpu_idle(struct cpuidle_device *dev,
371 struct cpuidle_driver *drv, int index);
372
373static struct cpuidle_driver apm_idle_driver = {
374 .name = "apm_idle",
375 .owner = THIS_MODULE,
376 .en_core_tk_irqen = 1,
377 .states = {
378 { /* entry 0 is for polling */ },
379 { /* entry 1 is for APM idle */
380 .name = "APM",
381 .desc = "APM idle",
382 .flags = CPUIDLE_FLAG_TIME_VALID,
383 .exit_latency = 250, /* WAG */
384 .target_residency = 500, /* WAG */
385 .enter = &apm_cpu_idle
386 },
387 },
388 .state_count = 2,
389};
390
391static struct cpuidle_device apm_cpuidle_device;
392
370/* 393/*
371 * Local variables 394 * Local variables
372 */ 395 */
@@ -377,7 +400,6 @@ static struct {
377static int clock_slowed; 400static int clock_slowed;
378static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD; 401static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD;
379static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD; 402static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD;
380static int set_pm_idle;
381static int suspends_pending; 403static int suspends_pending;
382static int standbys_pending; 404static int standbys_pending;
383static int ignore_sys_suspend; 405static int ignore_sys_suspend;
@@ -884,8 +906,6 @@ static void apm_do_busy(void)
884#define IDLE_CALC_LIMIT (HZ * 100) 906#define IDLE_CALC_LIMIT (HZ * 100)
885#define IDLE_LEAKY_MAX 16 907#define IDLE_LEAKY_MAX 16
886 908
887static void (*original_pm_idle)(void) __read_mostly;
888
889/** 909/**
890 * apm_cpu_idle - cpu idling for APM capable Linux 910 * apm_cpu_idle - cpu idling for APM capable Linux
891 * 911 *
@@ -894,7 +914,8 @@ static void (*original_pm_idle)(void) __read_mostly;
894 * Furthermore it calls the system default idle routine. 914 * Furthermore it calls the system default idle routine.
895 */ 915 */
896 916
897static void apm_cpu_idle(void) 917static int apm_cpu_idle(struct cpuidle_device *dev,
918 struct cpuidle_driver *drv, int index)
898{ 919{
899 static int use_apm_idle; /* = 0 */ 920 static int use_apm_idle; /* = 0 */
900 static unsigned int last_jiffies; /* = 0 */ 921 static unsigned int last_jiffies; /* = 0 */
@@ -905,7 +926,6 @@ static void apm_cpu_idle(void)
905 unsigned int jiffies_since_last_check = jiffies - last_jiffies; 926 unsigned int jiffies_since_last_check = jiffies - last_jiffies;
906 unsigned int bucket; 927 unsigned int bucket;
907 928
908 WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012");
909recalc: 929recalc:
910 task_cputime(current, NULL, &stime); 930 task_cputime(current, NULL, &stime);
911 if (jiffies_since_last_check > IDLE_CALC_LIMIT) { 931 if (jiffies_since_last_check > IDLE_CALC_LIMIT) {
@@ -951,10 +971,7 @@ recalc:
951 break; 971 break;
952 } 972 }
953 } 973 }
954 if (original_pm_idle) 974 default_idle();
955 original_pm_idle();
956 else
957 default_idle();
958 local_irq_disable(); 975 local_irq_disable();
959 jiffies_since_last_check = jiffies - last_jiffies; 976 jiffies_since_last_check = jiffies - last_jiffies;
960 if (jiffies_since_last_check > idle_period) 977 if (jiffies_since_last_check > idle_period)
@@ -964,7 +981,7 @@ recalc:
964 if (apm_idle_done) 981 if (apm_idle_done)
965 apm_do_busy(); 982 apm_do_busy();
966 983
967 local_irq_enable(); 984 return index;
968} 985}
969 986
970/** 987/**
@@ -2382,9 +2399,9 @@ static int __init apm_init(void)
2382 if (HZ != 100) 2399 if (HZ != 100)
2383 idle_period = (idle_period * HZ) / 100; 2400 idle_period = (idle_period * HZ) / 100;
2384 if (idle_threshold < 100) { 2401 if (idle_threshold < 100) {
2385 original_pm_idle = pm_idle; 2402 if (!cpuidle_register_driver(&apm_idle_driver))
2386 pm_idle = apm_cpu_idle; 2403 if (cpuidle_register_device(&apm_cpuidle_device))
2387 set_pm_idle = 1; 2404 cpuidle_unregister_driver(&apm_idle_driver);
2388 } 2405 }
2389 2406
2390 return 0; 2407 return 0;
@@ -2394,15 +2411,9 @@ static void __exit apm_exit(void)
2394{ 2411{
2395 int error; 2412 int error;
2396 2413
2397 if (set_pm_idle) { 2414 cpuidle_unregister_device(&apm_cpuidle_device);
2398 pm_idle = original_pm_idle; 2415 cpuidle_unregister_driver(&apm_idle_driver);
2399 /* 2416
2400 * We are about to unload the current idle thread pm callback
2401 * (pm_idle), Wait for all processors to update cached/local
2402 * copies of pm_idle before proceeding.
2403 */
2404 kick_all_cpus_sync();
2405 }
2406 if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0) 2417 if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0)
2407 && (apm_info.connection_version > 0x0100)) { 2418 && (apm_info.connection_version > 0x0100)) {
2408 error = apm_engage_power_management(APM_DEVICE_ALL, 0); 2419 error = apm_engage_power_management(APM_DEVICE_ALL, 0);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 92dfec986a48..af6455e3fcc9 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -17,15 +17,6 @@
17#include <asm/paravirt.h> 17#include <asm/paravirt.h>
18#include <asm/alternative.h> 18#include <asm/alternative.h>
19 19
20static int __init no_halt(char *s)
21{
22 WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n");
23 boot_cpu_data.hlt_works_ok = 0;
24 return 1;
25}
26
27__setup("no-hlt", no_halt);
28
29static int __init no_387(char *s) 20static int __init no_387(char *s)
30{ 21{
31 boot_cpu_data.hard_math = 0; 22 boot_cpu_data.hard_math = 0;
@@ -89,23 +80,6 @@ static void __init check_fpu(void)
89 pr_warn("Hmm, FPU with FDIV bug\n"); 80 pr_warn("Hmm, FPU with FDIV bug\n");
90} 81}
91 82
92static void __init check_hlt(void)
93{
94 if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
95 return;
96
97 pr_info("Checking 'hlt' instruction... ");
98 if (!boot_cpu_data.hlt_works_ok) {
99 pr_cont("disabled\n");
100 return;
101 }
102 halt();
103 halt();
104 halt();
105 halt();
106 pr_cont("OK\n");
107}
108
109/* 83/*
110 * Check whether we are able to run this kernel safely on SMP. 84 * Check whether we are able to run this kernel safely on SMP.
111 * 85 *
@@ -129,7 +103,6 @@ void __init check_bugs(void)
129 print_cpu_info(&boot_cpu_data); 103 print_cpu_info(&boot_cpu_data);
130#endif 104#endif
131 check_config(); 105 check_config();
132 check_hlt();
133 init_utsname()->machine[1] = 106 init_utsname()->machine[1] =
134 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 107 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
135 alternative_instructions(); 108 alternative_instructions();
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 3286a92e662a..e280253f6f94 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -28,7 +28,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
28{ 28{
29 seq_printf(m, 29 seq_printf(m,
30 "fdiv_bug\t: %s\n" 30 "fdiv_bug\t: %s\n"
31 "hlt_bug\t\t: %s\n"
32 "f00f_bug\t: %s\n" 31 "f00f_bug\t: %s\n"
33 "coma_bug\t: %s\n" 32 "coma_bug\t: %s\n"
34 "fpu\t\t: %s\n" 33 "fpu\t\t: %s\n"
@@ -36,7 +35,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
36 "cpuid level\t: %d\n" 35 "cpuid level\t: %d\n"
37 "wp\t\t: %s\n", 36 "wp\t\t: %s\n",
38 c->fdiv_bug ? "yes" : "no", 37 c->fdiv_bug ? "yes" : "no",
39 c->hlt_works_ok ? "no" : "yes",
40 c->f00f_bug ? "yes" : "no", 38 c->f00f_bug ? "yes" : "no",
41 c->coma_bug ? "yes" : "no", 39 c->coma_bug ? "yes" : "no",
42 c->hard_math ? "yes" : "no", 40 c->hard_math ? "yes" : "no",
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 2ed787f15bf0..14ae10031ff0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -268,13 +268,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
268unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; 268unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
269EXPORT_SYMBOL(boot_option_idle_override); 269EXPORT_SYMBOL(boot_option_idle_override);
270 270
271/* 271static void (*x86_idle)(void);
272 * Powermanagement idle function, if any..
273 */
274void (*pm_idle)(void);
275#ifdef CONFIG_APM_MODULE
276EXPORT_SYMBOL(pm_idle);
277#endif
278 272
279#ifndef CONFIG_SMP 273#ifndef CONFIG_SMP
280static inline void play_dead(void) 274static inline void play_dead(void)
@@ -351,7 +345,7 @@ void cpu_idle(void)
351 rcu_idle_enter(); 345 rcu_idle_enter();
352 346
353 if (cpuidle_idle_call()) 347 if (cpuidle_idle_call())
354 pm_idle(); 348 x86_idle();
355 349
356 rcu_idle_exit(); 350 rcu_idle_exit();
357 start_critical_timings(); 351 start_critical_timings();
@@ -375,7 +369,6 @@ void cpu_idle(void)
375 */ 369 */
376void default_idle(void) 370void default_idle(void)
377{ 371{
378 trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
379 trace_cpu_idle_rcuidle(1, smp_processor_id()); 372 trace_cpu_idle_rcuidle(1, smp_processor_id());
380 current_thread_info()->status &= ~TS_POLLING; 373 current_thread_info()->status &= ~TS_POLLING;
381 /* 374 /*
@@ -389,21 +382,22 @@ void default_idle(void)
389 else 382 else
390 local_irq_enable(); 383 local_irq_enable();
391 current_thread_info()->status |= TS_POLLING; 384 current_thread_info()->status |= TS_POLLING;
392 trace_power_end_rcuidle(smp_processor_id());
393 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 385 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
394} 386}
395#ifdef CONFIG_APM_MODULE 387#ifdef CONFIG_APM_MODULE
396EXPORT_SYMBOL(default_idle); 388EXPORT_SYMBOL(default_idle);
397#endif 389#endif
398 390
399bool set_pm_idle_to_default(void) 391#ifdef CONFIG_XEN
392bool xen_set_default_idle(void)
400{ 393{
401 bool ret = !!pm_idle; 394 bool ret = !!x86_idle;
402 395
403 pm_idle = default_idle; 396 x86_idle = default_idle;
404 397
405 return ret; 398 return ret;
406} 399}
400#endif
407void stop_this_cpu(void *dummy) 401void stop_this_cpu(void *dummy)
408{ 402{
409 local_irq_disable(); 403 local_irq_disable();
@@ -413,31 +407,8 @@ void stop_this_cpu(void *dummy)
413 set_cpu_online(smp_processor_id(), false); 407 set_cpu_online(smp_processor_id(), false);
414 disable_local_APIC(); 408 disable_local_APIC();
415 409
416 for (;;) { 410 for (;;)
417 if (hlt_works(smp_processor_id())) 411 halt();
418 halt();
419 }
420}
421
422/* Default MONITOR/MWAIT with no hints, used for default C1 state */
423static void mwait_idle(void)
424{
425 if (!need_resched()) {
426 trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
427 trace_cpu_idle_rcuidle(1, smp_processor_id());
428 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
429 clflush((void *)&current_thread_info()->flags);
430
431 __monitor((void *)&current_thread_info()->flags, 0, 0);
432 smp_mb();
433 if (!need_resched())
434 __sti_mwait(0, 0);
435 else
436 local_irq_enable();
437 trace_power_end_rcuidle(smp_processor_id());
438 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
439 } else
440 local_irq_enable();
441} 412}
442 413
443/* 414/*
@@ -447,62 +418,13 @@ static void mwait_idle(void)
447 */ 418 */
448static void poll_idle(void) 419static void poll_idle(void)
449{ 420{
450 trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
451 trace_cpu_idle_rcuidle(0, smp_processor_id()); 421 trace_cpu_idle_rcuidle(0, smp_processor_id());
452 local_irq_enable(); 422 local_irq_enable();
453 while (!need_resched()) 423 while (!need_resched())
454 cpu_relax(); 424 cpu_relax();
455 trace_power_end_rcuidle(smp_processor_id());
456 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 425 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
457} 426}
458 427
459/*
460 * mwait selection logic:
461 *
462 * It depends on the CPU. For AMD CPUs that support MWAIT this is
463 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
464 * then depend on a clock divisor and current Pstate of the core. If
465 * all cores of a processor are in halt state (C1) the processor can
466 * enter the C1E (C1 enhanced) state. If mwait is used this will never
467 * happen.
468 *
469 * idle=mwait overrides this decision and forces the usage of mwait.
470 */
471
472#define MWAIT_INFO 0x05
473#define MWAIT_ECX_EXTENDED_INFO 0x01
474#define MWAIT_EDX_C1 0xf0
475
476int mwait_usable(const struct cpuinfo_x86 *c)
477{
478 u32 eax, ebx, ecx, edx;
479
480 /* Use mwait if idle=mwait boot option is given */
481 if (boot_option_idle_override == IDLE_FORCE_MWAIT)
482 return 1;
483
484 /*
485 * Any idle= boot option other than idle=mwait means that we must not
486 * use mwait. Eg: idle=halt or idle=poll or idle=nomwait
487 */
488 if (boot_option_idle_override != IDLE_NO_OVERRIDE)
489 return 0;
490
491 if (c->cpuid_level < MWAIT_INFO)
492 return 0;
493
494 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
495 /* Check, whether EDX has extended info about MWAIT */
496 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
497 return 1;
498
499 /*
500 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
501 * C1 supports MWAIT
502 */
503 return (edx & MWAIT_EDX_C1);
504}
505
506bool amd_e400_c1e_detected; 428bool amd_e400_c1e_detected;
507EXPORT_SYMBOL(amd_e400_c1e_detected); 429EXPORT_SYMBOL(amd_e400_c1e_detected);
508 430
@@ -567,31 +489,24 @@ static void amd_e400_idle(void)
567void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 489void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
568{ 490{
569#ifdef CONFIG_SMP 491#ifdef CONFIG_SMP
570 if (pm_idle == poll_idle && smp_num_siblings > 1) { 492 if (x86_idle == poll_idle && smp_num_siblings > 1)
571 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); 493 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
572 }
573#endif 494#endif
574 if (pm_idle) 495 if (x86_idle)
575 return; 496 return;
576 497
577 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { 498 if (cpu_has_amd_erratum(amd_erratum_400)) {
578 /*
579 * One CPU supports mwait => All CPUs supports mwait
580 */
581 pr_info("using mwait in idle threads\n");
582 pm_idle = mwait_idle;
583 } else if (cpu_has_amd_erratum(amd_erratum_400)) {
584 /* E400: APIC timer interrupt does not wake up CPU from C1e */ 499 /* E400: APIC timer interrupt does not wake up CPU from C1e */
585 pr_info("using AMD E400 aware idle routine\n"); 500 pr_info("using AMD E400 aware idle routine\n");
586 pm_idle = amd_e400_idle; 501 x86_idle = amd_e400_idle;
587 } else 502 } else
588 pm_idle = default_idle; 503 x86_idle = default_idle;
589} 504}
590 505
591void __init init_amd_e400_c1e_mask(void) 506void __init init_amd_e400_c1e_mask(void)
592{ 507{
593 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */ 508 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
594 if (pm_idle == amd_e400_idle) 509 if (x86_idle == amd_e400_idle)
595 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL); 510 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
596} 511}
597 512
@@ -602,11 +517,8 @@ static int __init idle_setup(char *str)
602 517
603 if (!strcmp(str, "poll")) { 518 if (!strcmp(str, "poll")) {
604 pr_info("using polling idle threads\n"); 519 pr_info("using polling idle threads\n");
605 pm_idle = poll_idle; 520 x86_idle = poll_idle;
606 boot_option_idle_override = IDLE_POLL; 521 boot_option_idle_override = IDLE_POLL;
607 } else if (!strcmp(str, "mwait")) {
608 boot_option_idle_override = IDLE_FORCE_MWAIT;
609 WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
610 } else if (!strcmp(str, "halt")) { 522 } else if (!strcmp(str, "halt")) {
611 /* 523 /*
612 * When the boot option of idle=halt is added, halt is 524 * When the boot option of idle=halt is added, halt is
@@ -615,7 +527,7 @@ static int __init idle_setup(char *str)
615 * To continue to load the CPU idle driver, don't touch 527 * To continue to load the CPU idle driver, don't touch
616 * the boot_option_idle_override. 528 * the boot_option_idle_override.
617 */ 529 */
618 pm_idle = default_idle; 530 x86_idle = default_idle;
619 boot_option_idle_override = IDLE_HALT; 531 boot_option_idle_override = IDLE_HALT;
620 } else if (!strcmp(str, "nomwait")) { 532 } else if (!strcmp(str, "nomwait")) {
621 /* 533 /*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ed0fe385289d..a6ceaedc396a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1369,7 +1369,7 @@ static inline void mwait_play_dead(void)
1369 void *mwait_ptr; 1369 void *mwait_ptr;
1370 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); 1370 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
1371 1371
1372 if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c))) 1372 if (!this_cpu_has(X86_FEATURE_MWAIT))
1373 return; 1373 return;
1374 if (!this_cpu_has(X86_FEATURE_CLFLSH)) 1374 if (!this_cpu_has(X86_FEATURE_CLFLSH))
1375 return; 1375 return;
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
index 2fdca25905ae..fef7d0ba7e3a 100644
--- a/arch/x86/platform/olpc/olpc-xo15-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
@@ -195,7 +195,7 @@ err_sysfs:
195 return r; 195 return r;
196} 196}
197 197
198static int xo15_sci_remove(struct acpi_device *device, int type) 198static int xo15_sci_remove(struct acpi_device *device)
199{ 199{
200 acpi_disable_gpe(NULL, xo15_sci_gpe); 200 acpi_disable_gpe(NULL, xo15_sci_gpe);
201 acpi_remove_gpe_handler(NULL, xo15_sci_gpe, xo15_sci_gpe_handler); 201 acpi_remove_gpe_handler(NULL, xo15_sci_gpe, xo15_sci_gpe_handler);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 8971a26d21ab..94eac5c85cdc 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -556,12 +556,9 @@ void __init xen_arch_setup(void)
556 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE); 556 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
557 557
558 /* Set up idle, making sure it calls safe_halt() pvop */ 558 /* Set up idle, making sure it calls safe_halt() pvop */
559#ifdef CONFIG_X86_32
560 boot_cpu_data.hlt_works_ok = 1;
561#endif
562 disable_cpuidle(); 559 disable_cpuidle();
563 disable_cpufreq(); 560 disable_cpufreq();
564 WARN_ON(set_pm_idle_to_default()); 561 WARN_ON(xen_set_default_idle());
565 fiddle_vdso(); 562 fiddle_vdso();
566#ifdef CONFIG_NUMA 563#ifdef CONFIG_NUMA
567 numa_off = 1; 564 numa_off = 1;