diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-30 10:50:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-30 10:50:17 -0400 |
commit | 8700c95adb033843fc163d112b9d21d4fda78018 (patch) | |
tree | 7bb9a37b8fe6328f63a61d88063c556346001098 /arch | |
parent | 16fa94b532b1958f508e07eca1a9256351241fbc (diff) | |
parent | d190e8195b90bc1e65c494fe08e54e9e581bfd16 (diff) |
Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull SMP/hotplug changes from Ingo Molnar:
"This is a pretty large, multi-arch series unifying and generalizing
the various disjunct pieces of idle routines that architectures have
historically copied from each other and have grown in random, wildly
inconsistent and sometimes buggy directions:
101 files changed, 455 insertions(+), 1328 deletions(-)
this went through a number of review and test iterations before it was
committed, it was tested on various architectures, was exposed to
linux-next for quite some time - nevertheless it might cause problems
on architectures that don't read the mailing lists and don't regularly
test linux-next.
This cat herding excercise was motivated by the -rt kernel, and was
brought to you by Thomas "the Whip" Gleixner."
* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (40 commits)
idle: Remove GENERIC_IDLE_LOOP config switch
um: Use generic idle loop
ia64: Make sure interrupts enabled when we "safe_halt()"
sparc: Use generic idle loop
idle: Remove unused ARCH_HAS_DEFAULT_IDLE
bfin: Fix typo in arch_cpu_idle()
xtensa: Use generic idle loop
x86: Use generic idle loop
unicore: Use generic idle loop
tile: Use generic idle loop
tile: Enter idle with preemption disabled
sh: Use generic idle loop
score: Use generic idle loop
s390: Use generic idle loop
powerpc: Use generic idle loop
parisc: Use generic idle loop
openrisc: Use generic idle loop
mn10300: Use generic idle loop
mips: Use generic idle loop
microblaze: Use generic idle loop
...
Diffstat (limited to 'arch')
94 files changed, 288 insertions, 1322 deletions
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index 1f8c72959fb6..52cd2a4a3ff4 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h | |||
@@ -95,8 +95,6 @@ register struct thread_info *__current_thread_info __asm__("$8"); | |||
95 | #define TS_POLLING 0x0010 /* idle task polling need_resched, | 95 | #define TS_POLLING 0x0010 /* idle task polling need_resched, |
96 | skip sending interrupt */ | 96 | skip sending interrupt */ |
97 | 97 | ||
98 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | ||
99 | |||
100 | #ifndef __ASSEMBLY__ | 98 | #ifndef __ASSEMBLY__ |
101 | #define HAVE_SET_RESTORE_SIGMASK 1 | 99 | #define HAVE_SET_RESTORE_SIGMASK 1 |
102 | static inline void set_restore_sigmask(void) | 100 | static inline void set_restore_sigmask(void) |
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 63d27fb9b023..a3fd8a29ccac 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c | |||
@@ -46,25 +46,6 @@ | |||
46 | void (*pm_power_off)(void) = machine_power_off; | 46 | void (*pm_power_off)(void) = machine_power_off; |
47 | EXPORT_SYMBOL(pm_power_off); | 47 | EXPORT_SYMBOL(pm_power_off); |
48 | 48 | ||
49 | void | ||
50 | cpu_idle(void) | ||
51 | { | ||
52 | current_thread_info()->status |= TS_POLLING; | ||
53 | |||
54 | while (1) { | ||
55 | /* FIXME -- EV6 and LCA45 know how to power down | ||
56 | the CPU. */ | ||
57 | |||
58 | rcu_idle_enter(); | ||
59 | while (!need_resched()) | ||
60 | cpu_relax(); | ||
61 | |||
62 | rcu_idle_exit(); | ||
63 | schedule_preempt_disabled(); | ||
64 | } | ||
65 | } | ||
66 | |||
67 | |||
68 | struct halt_info { | 49 | struct halt_info { |
69 | int mode; | 50 | int mode; |
70 | char *restart_cmd; | 51 | char *restart_cmd; |
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 9603bc234b47..7b60834fb4b2 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c | |||
@@ -167,8 +167,7 @@ smp_callin(void) | |||
167 | cpuid, current, current->active_mm)); | 167 | cpuid, current, current->active_mm)); |
168 | 168 | ||
169 | preempt_disable(); | 169 | preempt_disable(); |
170 | /* Do nothing. */ | 170 | cpu_startup_entry(CPUHP_ONLINE); |
171 | cpu_idle(); | ||
172 | } | 171 | } |
173 | 172 | ||
174 | /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */ | 173 | /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */ |
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 0a7531d99294..cad66851e0c4 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c | |||
@@ -41,37 +41,12 @@ SYSCALL_DEFINE0(arc_gettls) | |||
41 | return task_thread_info(current)->thr_ptr; | 41 | return task_thread_info(current)->thr_ptr; |
42 | } | 42 | } |
43 | 43 | ||
44 | static inline void arch_idle(void) | 44 | void arch_cpu_idle(void) |
45 | { | 45 | { |
46 | /* sleep, but enable all interrupts before committing */ | 46 | /* sleep, but enable all interrupts before committing */ |
47 | __asm__("sleep 0x3"); | 47 | __asm__("sleep 0x3"); |
48 | } | 48 | } |
49 | 49 | ||
50 | void cpu_idle(void) | ||
51 | { | ||
52 | /* Since we SLEEP in idle loop, TIF_POLLING_NRFLAG can't be set */ | ||
53 | |||
54 | /* endless idle loop with no priority at all */ | ||
55 | while (1) { | ||
56 | tick_nohz_idle_enter(); | ||
57 | rcu_idle_enter(); | ||
58 | |||
59 | doze: | ||
60 | local_irq_disable(); | ||
61 | if (!need_resched()) { | ||
62 | arch_idle(); | ||
63 | goto doze; | ||
64 | } else { | ||
65 | local_irq_enable(); | ||
66 | } | ||
67 | |||
68 | rcu_idle_exit(); | ||
69 | tick_nohz_idle_exit(); | ||
70 | |||
71 | schedule_preempt_disabled(); | ||
72 | } | ||
73 | } | ||
74 | |||
75 | asmlinkage void ret_from_fork(void); | 50 | asmlinkage void ret_from_fork(void); |
76 | 51 | ||
77 | /* Layout of Child kernel mode stack as setup at the end of this function is | 52 | /* Layout of Child kernel mode stack as setup at the end of this function is |
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index 3af3e06dcf02..5c7fd603d216 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c | |||
@@ -141,7 +141,7 @@ void __cpuinit start_kernel_secondary(void) | |||
141 | 141 | ||
142 | local_irq_enable(); | 142 | local_irq_enable(); |
143 | preempt_disable(); | 143 | preempt_disable(); |
144 | cpu_idle(); | 144 | cpu_startup_entry(CPUHP_ONLINE); |
145 | } | 145 | } |
146 | 146 | ||
147 | /* | 147 | /* |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index bbddefea77bb..a39e3214ea3d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -15,6 +15,7 @@ config ARM | |||
15 | select GENERIC_IRQ_SHOW | 15 | select GENERIC_IRQ_SHOW |
16 | select GENERIC_PCI_IOMAP | 16 | select GENERIC_PCI_IOMAP |
17 | select GENERIC_SMP_IDLE_THREAD | 17 | select GENERIC_SMP_IDLE_THREAD |
18 | select GENERIC_IDLE_POLL_SETUP | ||
18 | select GENERIC_STRNCPY_FROM_USER | 19 | select GENERIC_STRNCPY_FROM_USER |
19 | select GENERIC_STRNLEN_USER | 20 | select GENERIC_STRNLEN_USER |
20 | select HARDIRQS_SW_RESEND | 21 | select HARDIRQS_SW_RESEND |
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h index 5a85f148b607..21a23e378bbe 100644 --- a/arch/arm/include/asm/system_misc.h +++ b/arch/arm/include/asm/system_misc.h | |||
@@ -21,9 +21,6 @@ extern void (*arm_pm_idle)(void); | |||
21 | 21 | ||
22 | extern unsigned int user_debug; | 22 | extern unsigned int user_debug; |
23 | 23 | ||
24 | extern void disable_hlt(void); | ||
25 | extern void enable_hlt(void); | ||
26 | |||
27 | #endif /* !__ASSEMBLY__ */ | 24 | #endif /* !__ASSEMBLY__ */ |
28 | 25 | ||
29 | #endif /* __ASM_ARM_SYSTEM_MISC_H */ | 26 | #endif /* __ASM_ARM_SYSTEM_MISC_H */ |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 047d3e40e470..c9a5e2ce8aa9 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -57,38 +57,6 @@ static const char *isa_modes[] = { | |||
57 | "ARM" , "Thumb" , "Jazelle", "ThumbEE" | 57 | "ARM" , "Thumb" , "Jazelle", "ThumbEE" |
58 | }; | 58 | }; |
59 | 59 | ||
60 | static volatile int hlt_counter; | ||
61 | |||
62 | void disable_hlt(void) | ||
63 | { | ||
64 | hlt_counter++; | ||
65 | } | ||
66 | |||
67 | EXPORT_SYMBOL(disable_hlt); | ||
68 | |||
69 | void enable_hlt(void) | ||
70 | { | ||
71 | hlt_counter--; | ||
72 | BUG_ON(hlt_counter < 0); | ||
73 | } | ||
74 | |||
75 | EXPORT_SYMBOL(enable_hlt); | ||
76 | |||
77 | static int __init nohlt_setup(char *__unused) | ||
78 | { | ||
79 | hlt_counter = 1; | ||
80 | return 1; | ||
81 | } | ||
82 | |||
83 | static int __init hlt_setup(char *__unused) | ||
84 | { | ||
85 | hlt_counter = 0; | ||
86 | return 1; | ||
87 | } | ||
88 | |||
89 | __setup("nohlt", nohlt_setup); | ||
90 | __setup("hlt", hlt_setup); | ||
91 | |||
92 | extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); | 60 | extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); |
93 | typedef void (*phys_reset_t)(unsigned long); | 61 | typedef void (*phys_reset_t)(unsigned long); |
94 | 62 | ||
@@ -172,54 +140,38 @@ static void default_idle(void) | |||
172 | local_irq_enable(); | 140 | local_irq_enable(); |
173 | } | 141 | } |
174 | 142 | ||
175 | /* | 143 | void arch_cpu_idle_prepare(void) |
176 | * The idle thread. | ||
177 | * We always respect 'hlt_counter' to prevent low power idle. | ||
178 | */ | ||
179 | void cpu_idle(void) | ||
180 | { | 144 | { |
181 | local_fiq_enable(); | 145 | local_fiq_enable(); |
146 | } | ||
182 | 147 | ||
183 | /* endless idle loop with no priority at all */ | 148 | void arch_cpu_idle_enter(void) |
184 | while (1) { | 149 | { |
185 | tick_nohz_idle_enter(); | 150 | ledtrig_cpu(CPU_LED_IDLE_START); |
186 | rcu_idle_enter(); | 151 | #ifdef CONFIG_PL310_ERRATA_769419 |
187 | ledtrig_cpu(CPU_LED_IDLE_START); | 152 | wmb(); |
188 | while (!need_resched()) { | ||
189 | #ifdef CONFIG_HOTPLUG_CPU | ||
190 | if (cpu_is_offline(smp_processor_id())) | ||
191 | cpu_die(); | ||
192 | #endif | 153 | #endif |
154 | } | ||
193 | 155 | ||
194 | /* | 156 | void arch_cpu_idle_exit(void) |
195 | * We need to disable interrupts here | 157 | { |
196 | * to ensure we don't miss a wakeup call. | 158 | ledtrig_cpu(CPU_LED_IDLE_END); |
197 | */ | 159 | } |
198 | local_irq_disable(); | 160 | |
199 | #ifdef CONFIG_PL310_ERRATA_769419 | 161 | #ifdef CONFIG_HOTPLUG_CPU |
200 | wmb(); | 162 | void arch_cpu_idle_dead(void) |
163 | { | ||
164 | cpu_die(); | ||
165 | } | ||
201 | #endif | 166 | #endif |
202 | if (hlt_counter) { | 167 | |
203 | local_irq_enable(); | 168 | /* |
204 | cpu_relax(); | 169 | * Called from the core idle loop. |
205 | } else if (!need_resched()) { | 170 | */ |
206 | stop_critical_timings(); | 171 | void arch_cpu_idle(void) |
207 | if (cpuidle_idle_call()) | 172 | { |
208 | default_idle(); | 173 | if (cpuidle_idle_call()) |
209 | start_critical_timings(); | 174 | default_idle(); |
210 | /* | ||
211 | * default_idle functions must always | ||
212 | * return with IRQs enabled. | ||
213 | */ | ||
214 | WARN_ON(irqs_disabled()); | ||
215 | } else | ||
216 | local_irq_enable(); | ||
217 | } | ||
218 | ledtrig_cpu(CPU_LED_IDLE_END); | ||
219 | rcu_idle_exit(); | ||
220 | tick_nohz_idle_exit(); | ||
221 | schedule_preempt_disabled(); | ||
222 | } | ||
223 | } | 175 | } |
224 | 176 | ||
225 | static char reboot_mode = 'h'; | 177 | static char reboot_mode = 'h'; |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 1f2ccccaf009..4619177bcfe6 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -336,7 +336,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
336 | /* | 336 | /* |
337 | * OK, it's off to the idle thread for us | 337 | * OK, it's off to the idle thread for us |
338 | */ | 338 | */ |
339 | cpu_idle(); | 339 | cpu_startup_entry(CPUHP_ONLINE); |
340 | } | 340 | } |
341 | 341 | ||
342 | void __init smp_cpus_done(unsigned int max_cpus) | 342 | void __init smp_cpus_done(unsigned int max_cpus) |
diff --git a/arch/arm/mach-gemini/idle.c b/arch/arm/mach-gemini/idle.c index 92bbd6bb600a..87dff4f5059e 100644 --- a/arch/arm/mach-gemini/idle.c +++ b/arch/arm/mach-gemini/idle.c | |||
@@ -13,9 +13,11 @@ static void gemini_idle(void) | |||
13 | * will never wakeup... Acctualy it is not very good to enable | 13 | * will never wakeup... Acctualy it is not very good to enable |
14 | * interrupts first since scheduler can miss a tick, but there is | 14 | * interrupts first since scheduler can miss a tick, but there is |
15 | * no other way around this. Platforms that needs it for power saving | 15 | * no other way around this. Platforms that needs it for power saving |
16 | * should call enable_hlt() in init code, since by default it is | 16 | * should enable it in init code, since by default it is |
17 | * disabled. | 17 | * disabled. |
18 | */ | 18 | */ |
19 | |||
20 | /* FIXME: Enabling interrupts here is racy! */ | ||
19 | local_irq_enable(); | 21 | local_irq_enable(); |
20 | cpu_do_idle(); | 22 | cpu_do_idle(); |
21 | } | 23 | } |
diff --git a/arch/arm/mach-gemini/irq.c b/arch/arm/mach-gemini/irq.c index 020852d3bdd8..6d8f6d1669ff 100644 --- a/arch/arm/mach-gemini/irq.c +++ b/arch/arm/mach-gemini/irq.c | |||
@@ -15,6 +15,8 @@ | |||
15 | #include <linux/stddef.h> | 15 | #include <linux/stddef.h> |
16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/cpu.h> | ||
19 | |||
18 | #include <asm/irq.h> | 20 | #include <asm/irq.h> |
19 | #include <asm/mach/irq.h> | 21 | #include <asm/mach/irq.h> |
20 | #include <asm/system_misc.h> | 22 | #include <asm/system_misc.h> |
@@ -77,7 +79,7 @@ void __init gemini_init_irq(void) | |||
77 | * Disable the idle handler by default since it is buggy | 79 | * Disable the idle handler by default since it is buggy |
78 | * For more info see arch/arm/mach-gemini/idle.c | 80 | * For more info see arch/arm/mach-gemini/idle.c |
79 | */ | 81 | */ |
80 | disable_hlt(); | 82 | cpu_idle_poll_ctrl(true); |
81 | 83 | ||
82 | request_resource(&iomem_resource, &irq_resource); | 84 | request_resource(&iomem_resource, &irq_resource); |
83 | 85 | ||
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 1dbeb7c99d58..6600cff6bd92 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/io.h> | 29 | #include <linux/io.h> |
30 | #include <linux/export.h> | 30 | #include <linux/export.h> |
31 | #include <linux/gpio.h> | 31 | #include <linux/gpio.h> |
32 | #include <linux/cpu.h> | ||
32 | 33 | ||
33 | #include <mach/udc.h> | 34 | #include <mach/udc.h> |
34 | #include <mach/hardware.h> | 35 | #include <mach/hardware.h> |
@@ -239,7 +240,7 @@ void __init ixp4xx_init_irq(void) | |||
239 | * ixp4xx does not implement the XScale PWRMODE register | 240 | * ixp4xx does not implement the XScale PWRMODE register |
240 | * so it must not call cpu_do_idle(). | 241 | * so it must not call cpu_do_idle(). |
241 | */ | 242 | */ |
242 | disable_hlt(); | 243 | cpu_idle_poll_ctrl(true); |
243 | 244 | ||
244 | /* Route all sources to IRQ instead of FIQ */ | 245 | /* Route all sources to IRQ instead of FIQ */ |
245 | *IXP4XX_ICLR = 0x0; | 246 | *IXP4XX_ICLR = 0x0; |
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c index 7a7690ab6cb8..db37f49da5ac 100644 --- a/arch/arm/mach-omap1/pm.c +++ b/arch/arm/mach-omap1/pm.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/module.h> | 43 | #include <linux/module.h> |
44 | #include <linux/io.h> | 44 | #include <linux/io.h> |
45 | #include <linux/atomic.h> | 45 | #include <linux/atomic.h> |
46 | #include <linux/cpu.h> | ||
46 | 47 | ||
47 | #include <asm/fncpy.h> | 48 | #include <asm/fncpy.h> |
48 | #include <asm/system_misc.h> | 49 | #include <asm/system_misc.h> |
@@ -584,8 +585,7 @@ static void omap_pm_init_proc(void) | |||
584 | static int omap_pm_prepare(void) | 585 | static int omap_pm_prepare(void) |
585 | { | 586 | { |
586 | /* We cannot sleep in idle until we have resumed */ | 587 | /* We cannot sleep in idle until we have resumed */ |
587 | disable_hlt(); | 588 | cpu_idle_poll_ctrl(true); |
588 | |||
589 | return 0; | 589 | return 0; |
590 | } | 590 | } |
591 | 591 | ||
@@ -621,7 +621,7 @@ static int omap_pm_enter(suspend_state_t state) | |||
621 | 621 | ||
622 | static void omap_pm_finish(void) | 622 | static void omap_pm_finish(void) |
623 | { | 623 | { |
624 | enable_hlt(); | 624 | cpu_idle_poll_ctrl(false); |
625 | } | 625 | } |
626 | 626 | ||
627 | 627 | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index a202a4785104..e512253601c8 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -138,6 +138,7 @@ | |||
138 | #include <linux/spinlock.h> | 138 | #include <linux/spinlock.h> |
139 | #include <linux/slab.h> | 139 | #include <linux/slab.h> |
140 | #include <linux/bootmem.h> | 140 | #include <linux/bootmem.h> |
141 | #include <linux/cpu.h> | ||
141 | 142 | ||
142 | #include <asm/system_misc.h> | 143 | #include <asm/system_misc.h> |
143 | 144 | ||
@@ -2157,7 +2158,7 @@ static int _enable(struct omap_hwmod *oh) | |||
2157 | if (soc_ops.enable_module) | 2158 | if (soc_ops.enable_module) |
2158 | soc_ops.enable_module(oh); | 2159 | soc_ops.enable_module(oh); |
2159 | if (oh->flags & HWMOD_BLOCK_WFI) | 2160 | if (oh->flags & HWMOD_BLOCK_WFI) |
2160 | disable_hlt(); | 2161 | cpu_idle_poll_ctrl(true); |
2161 | 2162 | ||
2162 | if (soc_ops.update_context_lost) | 2163 | if (soc_ops.update_context_lost) |
2163 | soc_ops.update_context_lost(oh); | 2164 | soc_ops.update_context_lost(oh); |
@@ -2221,7 +2222,7 @@ static int _idle(struct omap_hwmod *oh) | |||
2221 | _del_initiator_dep(oh, mpu_oh); | 2222 | _del_initiator_dep(oh, mpu_oh); |
2222 | 2223 | ||
2223 | if (oh->flags & HWMOD_BLOCK_WFI) | 2224 | if (oh->flags & HWMOD_BLOCK_WFI) |
2224 | enable_hlt(); | 2225 | cpu_idle_poll_ctrl(false); |
2225 | if (soc_ops.disable_module) | 2226 | if (soc_ops.disable_module) |
2226 | soc_ops.disable_module(oh); | 2227 | soc_ops.disable_module(oh); |
2227 | 2228 | ||
@@ -2331,7 +2332,7 @@ static int _shutdown(struct omap_hwmod *oh) | |||
2331 | _del_initiator_dep(oh, mpu_oh); | 2332 | _del_initiator_dep(oh, mpu_oh); |
2332 | /* XXX what about the other system initiators here? dma, dsp */ | 2333 | /* XXX what about the other system initiators here? dma, dsp */ |
2333 | if (oh->flags & HWMOD_BLOCK_WFI) | 2334 | if (oh->flags & HWMOD_BLOCK_WFI) |
2334 | enable_hlt(); | 2335 | cpu_idle_poll_ctrl(false); |
2335 | if (soc_ops.disable_module) | 2336 | if (soc_ops.disable_module) |
2336 | soc_ops.disable_module(oh); | 2337 | soc_ops.disable_module(oh); |
2337 | _disable_clocks(oh); | 2338 | _disable_clocks(oh); |
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c index 673a4c1d1d76..dec553349ae2 100644 --- a/arch/arm/mach-omap2/pm.c +++ b/arch/arm/mach-omap2/pm.c | |||
@@ -218,7 +218,7 @@ static int omap_pm_enter(suspend_state_t suspend_state) | |||
218 | 218 | ||
219 | static int omap_pm_begin(suspend_state_t state) | 219 | static int omap_pm_begin(suspend_state_t state) |
220 | { | 220 | { |
221 | disable_hlt(); | 221 | cpu_idle_poll_ctrl(true); |
222 | if (cpu_is_omap34xx()) | 222 | if (cpu_is_omap34xx()) |
223 | omap_prcm_irq_prepare(); | 223 | omap_prcm_irq_prepare(); |
224 | return 0; | 224 | return 0; |
@@ -226,8 +226,7 @@ static int omap_pm_begin(suspend_state_t state) | |||
226 | 226 | ||
227 | static void omap_pm_end(void) | 227 | static void omap_pm_end(void) |
228 | { | 228 | { |
229 | enable_hlt(); | 229 | cpu_idle_poll_ctrl(false); |
230 | return; | ||
231 | } | 230 | } |
232 | 231 | ||
233 | static void omap_pm_finish(void) | 232 | static void omap_pm_finish(void) |
diff --git a/arch/arm/mach-orion5x/board-dt.c b/arch/arm/mach-orion5x/board-dt.c index 35a8014529ca..94fbb815680c 100644 --- a/arch/arm/mach-orion5x/board-dt.c +++ b/arch/arm/mach-orion5x/board-dt.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/of.h> | 15 | #include <linux/of.h> |
16 | #include <linux/of_platform.h> | 16 | #include <linux/of_platform.h> |
17 | #include <linux/cpu.h> | ||
17 | #include <asm/system_misc.h> | 18 | #include <asm/system_misc.h> |
18 | #include <asm/mach/arch.h> | 19 | #include <asm/mach/arch.h> |
19 | #include <mach/orion5x.h> | 20 | #include <mach/orion5x.h> |
@@ -52,7 +53,7 @@ static void __init orion5x_dt_init(void) | |||
52 | */ | 53 | */ |
53 | if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) { | 54 | if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) { |
54 | printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n"); | 55 | printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n"); |
55 | disable_hlt(); | 56 | cpu_idle_poll_ctrl(true); |
56 | } | 57 | } |
57 | 58 | ||
58 | if (of_machine_is_compatible("lacie,ethernet-disk-mini-v2")) | 59 | if (of_machine_is_compatible("lacie,ethernet-disk-mini-v2")) |
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index d068f1431c40..ad71c8a03ffd 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c | |||
@@ -293,7 +293,7 @@ void __init orion5x_init(void) | |||
293 | */ | 293 | */ |
294 | if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) { | 294 | if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) { |
295 | printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n"); | 295 | printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n"); |
296 | disable_hlt(); | 296 | cpu_idle_poll_ctrl(true); |
297 | } | 297 | } |
298 | 298 | ||
299 | /* | 299 | /* |
diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c index b63dec848195..153555724988 100644 --- a/arch/arm/mach-shark/core.c +++ b/arch/arm/mach-shark/core.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/serial_8250.h> | 11 | #include <linux/serial_8250.h> |
12 | #include <linux/io.h> | 12 | #include <linux/io.h> |
13 | #include <linux/cpu.h> | ||
13 | 14 | ||
14 | #include <asm/setup.h> | 15 | #include <asm/setup.h> |
15 | #include <asm/mach-types.h> | 16 | #include <asm/mach-types.h> |
@@ -130,7 +131,7 @@ static void __init shark_timer_init(void) | |||
130 | 131 | ||
131 | static void shark_init_early(void) | 132 | static void shark_init_early(void) |
132 | { | 133 | { |
133 | disable_hlt(); | 134 | cpu_idle_poll_ctrl(true); |
134 | } | 135 | } |
135 | 136 | ||
136 | MACHINE_START(SHARK, "Shark") | 137 | MACHINE_START(SHARK, "Shark") |
diff --git a/arch/arm/mach-shmobile/suspend.c b/arch/arm/mach-shmobile/suspend.c index 47d83f7a70b6..5d92b5dd486b 100644 --- a/arch/arm/mach-shmobile/suspend.c +++ b/arch/arm/mach-shmobile/suspend.c | |||
@@ -12,6 +12,8 @@ | |||
12 | #include <linux/suspend.h> | 12 | #include <linux/suspend.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
15 | #include <linux/cpu.h> | ||
16 | |||
15 | #include <asm/io.h> | 17 | #include <asm/io.h> |
16 | #include <asm/system_misc.h> | 18 | #include <asm/system_misc.h> |
17 | 19 | ||
@@ -23,13 +25,13 @@ static int shmobile_suspend_default_enter(suspend_state_t suspend_state) | |||
23 | 25 | ||
24 | static int shmobile_suspend_begin(suspend_state_t state) | 26 | static int shmobile_suspend_begin(suspend_state_t state) |
25 | { | 27 | { |
26 | disable_hlt(); | 28 | cpu_idle_poll_ctrl(true); |
27 | return 0; | 29 | return 0; |
28 | } | 30 | } |
29 | 31 | ||
30 | static void shmobile_suspend_end(void) | 32 | static void shmobile_suspend_end(void) |
31 | { | 33 | { |
32 | enable_hlt(); | 34 | cpu_idle_poll_ctrl(false); |
33 | } | 35 | } |
34 | 36 | ||
35 | struct platform_suspend_ops shmobile_suspend_ops = { | 37 | struct platform_suspend_ops shmobile_suspend_ops = { |
diff --git a/arch/arm/mach-w90x900/dev.c b/arch/arm/mach-w90x900/dev.c index 7abdb9645c5b..e65a80a1ac75 100644 --- a/arch/arm/mach-w90x900/dev.c +++ b/arch/arm/mach-w90x900/dev.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/cpu.h> | ||
22 | 23 | ||
23 | #include <linux/mtd/physmap.h> | 24 | #include <linux/mtd/physmap.h> |
24 | #include <linux/mtd/mtd.h> | 25 | #include <linux/mtd/mtd.h> |
@@ -531,7 +532,7 @@ static struct platform_device *nuc900_public_dev[] __initdata = { | |||
531 | 532 | ||
532 | void __init nuc900_board_init(struct platform_device **device, int size) | 533 | void __init nuc900_board_init(struct platform_device **device, int size) |
533 | { | 534 | { |
534 | disable_hlt(); | 535 | cpu_idle_poll_ctrl(true); |
535 | platform_add_devices(device, size); | 536 | platform_add_devices(device, size); |
536 | platform_add_devices(nuc900_public_dev, ARRAY_SIZE(nuc900_public_dev)); | 537 | platform_add_devices(nuc900_public_dev, ARRAY_SIZE(nuc900_public_dev)); |
537 | spi_register_board_info(nuc900_spi_board_info, | 538 | spi_register_board_info(nuc900_spi_board_info, |
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 0337cdb0667b..83a0ad5936a5 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
@@ -84,11 +84,15 @@ EXPORT_SYMBOL_GPL(pm_power_off); | |||
84 | void (*pm_restart)(const char *cmd); | 84 | void (*pm_restart)(const char *cmd); |
85 | EXPORT_SYMBOL_GPL(pm_restart); | 85 | EXPORT_SYMBOL_GPL(pm_restart); |
86 | 86 | ||
87 | void arch_cpu_idle_prepare(void) | ||
88 | { | ||
89 | local_fiq_enable(); | ||
90 | } | ||
87 | 91 | ||
88 | /* | 92 | /* |
89 | * This is our default idle handler. | 93 | * This is our default idle handler. |
90 | */ | 94 | */ |
91 | static void default_idle(void) | 95 | void arch_cpu_idle(void) |
92 | { | 96 | { |
93 | /* | 97 | /* |
94 | * This should do all the clock switching and wait for interrupt | 98 | * This should do all the clock switching and wait for interrupt |
@@ -98,43 +102,6 @@ static void default_idle(void) | |||
98 | local_irq_enable(); | 102 | local_irq_enable(); |
99 | } | 103 | } |
100 | 104 | ||
101 | /* | ||
102 | * The idle thread. | ||
103 | * We always respect 'hlt_counter' to prevent low power idle. | ||
104 | */ | ||
105 | void cpu_idle(void) | ||
106 | { | ||
107 | local_fiq_enable(); | ||
108 | |||
109 | /* endless idle loop with no priority at all */ | ||
110 | while (1) { | ||
111 | tick_nohz_idle_enter(); | ||
112 | rcu_idle_enter(); | ||
113 | while (!need_resched()) { | ||
114 | /* | ||
115 | * We need to disable interrupts here to ensure | ||
116 | * we don't miss a wakeup call. | ||
117 | */ | ||
118 | local_irq_disable(); | ||
119 | if (!need_resched()) { | ||
120 | stop_critical_timings(); | ||
121 | default_idle(); | ||
122 | start_critical_timings(); | ||
123 | /* | ||
124 | * default_idle functions should always return | ||
125 | * with IRQs enabled. | ||
126 | */ | ||
127 | WARN_ON(irqs_disabled()); | ||
128 | } else { | ||
129 | local_irq_enable(); | ||
130 | } | ||
131 | } | ||
132 | rcu_idle_exit(); | ||
133 | tick_nohz_idle_exit(); | ||
134 | schedule_preempt_disabled(); | ||
135 | } | ||
136 | } | ||
137 | |||
138 | void machine_shutdown(void) | 105 | void machine_shutdown(void) |
139 | { | 106 | { |
140 | #ifdef CONFIG_SMP | 107 | #ifdef CONFIG_SMP |
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index bdd34597254b..261445c4666f 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
@@ -216,7 +216,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
216 | /* | 216 | /* |
217 | * OK, it's off to the idle thread for us | 217 | * OK, it's off to the idle thread for us |
218 | */ | 218 | */ |
219 | cpu_idle(); | 219 | cpu_startup_entry(CPUHP_ONLINE); |
220 | } | 220 | } |
221 | 221 | ||
222 | void __init smp_cpus_done(unsigned int max_cpus) | 222 | void __init smp_cpus_done(unsigned int max_cpus) |
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c index fd78f58ea79a..073c3c2fa521 100644 --- a/arch/avr32/kernel/process.c +++ b/arch/avr32/kernel/process.c | |||
@@ -30,18 +30,9 @@ EXPORT_SYMBOL(pm_power_off); | |||
30 | * This file handles the architecture-dependent parts of process handling.. | 30 | * This file handles the architecture-dependent parts of process handling.. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | void cpu_idle(void) | 33 | void arch_cpu_idle(void) |
34 | { | 34 | { |
35 | /* endless idle loop with no priority at all */ | 35 | cpu_enter_idle(); |
36 | while (1) { | ||
37 | tick_nohz_idle_enter(); | ||
38 | rcu_idle_enter(); | ||
39 | while (!need_resched()) | ||
40 | cpu_idle_sleep(); | ||
41 | rcu_idle_exit(); | ||
42 | tick_nohz_idle_exit(); | ||
43 | schedule_preempt_disabled(); | ||
44 | } | ||
45 | } | 36 | } |
46 | 37 | ||
47 | void machine_halt(void) | 38 | void machine_halt(void) |
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c index 05ad29112ff4..869a1c6ffeee 100644 --- a/arch/avr32/kernel/time.c +++ b/arch/avr32/kernel/time.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/irq.h> | 12 | #include <linux/irq.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/time.h> | 14 | #include <linux/time.h> |
15 | #include <linux/cpu.h> | ||
15 | 16 | ||
16 | #include <asm/sysreg.h> | 17 | #include <asm/sysreg.h> |
17 | 18 | ||
@@ -87,13 +88,17 @@ static void comparator_mode(enum clock_event_mode mode, | |||
87 | pr_debug("%s: start\n", evdev->name); | 88 | pr_debug("%s: start\n", evdev->name); |
88 | /* FALLTHROUGH */ | 89 | /* FALLTHROUGH */ |
89 | case CLOCK_EVT_MODE_RESUME: | 90 | case CLOCK_EVT_MODE_RESUME: |
90 | cpu_disable_idle_sleep(); | 91 | /* |
92 | * If we're using the COUNT and COMPARE registers we | ||
93 | * need to force idle poll. | ||
94 | */ | ||
95 | cpu_idle_poll_ctrl(true); | ||
91 | break; | 96 | break; |
92 | case CLOCK_EVT_MODE_UNUSED: | 97 | case CLOCK_EVT_MODE_UNUSED: |
93 | case CLOCK_EVT_MODE_SHUTDOWN: | 98 | case CLOCK_EVT_MODE_SHUTDOWN: |
94 | sysreg_write(COMPARE, 0); | 99 | sysreg_write(COMPARE, 0); |
95 | pr_debug("%s: stop\n", evdev->name); | 100 | pr_debug("%s: stop\n", evdev->name); |
96 | cpu_enable_idle_sleep(); | 101 | cpu_idle_poll_ctrl(false); |
97 | break; | 102 | break; |
98 | default: | 103 | default: |
99 | BUG(); | 104 | BUG(); |
diff --git a/arch/avr32/mach-at32ap/include/mach/pm.h b/arch/avr32/mach-at32ap/include/mach/pm.h index 979b355b77b6..f29ff2cd23d3 100644 --- a/arch/avr32/mach-at32ap/include/mach/pm.h +++ b/arch/avr32/mach-at32ap/include/mach/pm.h | |||
@@ -21,30 +21,6 @@ | |||
21 | extern void cpu_enter_idle(void); | 21 | extern void cpu_enter_idle(void); |
22 | extern void cpu_enter_standby(unsigned long sdramc_base); | 22 | extern void cpu_enter_standby(unsigned long sdramc_base); |
23 | 23 | ||
24 | extern bool disable_idle_sleep; | ||
25 | |||
26 | static inline void cpu_disable_idle_sleep(void) | ||
27 | { | ||
28 | disable_idle_sleep = true; | ||
29 | } | ||
30 | |||
31 | static inline void cpu_enable_idle_sleep(void) | ||
32 | { | ||
33 | disable_idle_sleep = false; | ||
34 | } | ||
35 | |||
36 | static inline void cpu_idle_sleep(void) | ||
37 | { | ||
38 | /* | ||
39 | * If we're using the COUNT and COMPARE registers for | ||
40 | * timekeeping, we can't use the IDLE state. | ||
41 | */ | ||
42 | if (disable_idle_sleep) | ||
43 | cpu_relax(); | ||
44 | else | ||
45 | cpu_enter_idle(); | ||
46 | } | ||
47 | |||
48 | void intc_set_suspend_handler(unsigned long offset); | 24 | void intc_set_suspend_handler(unsigned long offset); |
49 | #endif | 25 | #endif |
50 | 26 | ||
diff --git a/arch/avr32/mach-at32ap/pm-at32ap700x.S b/arch/avr32/mach-at32ap/pm-at32ap700x.S index f868f4ce761b..1c8e4e6bff03 100644 --- a/arch/avr32/mach-at32ap/pm-at32ap700x.S +++ b/arch/avr32/mach-at32ap/pm-at32ap700x.S | |||
@@ -18,13 +18,6 @@ | |||
18 | /* Same as 0xfff00000 but fits in a 21 bit signed immediate */ | 18 | /* Same as 0xfff00000 but fits in a 21 bit signed immediate */ |
19 | #define PM_BASE -0x100000 | 19 | #define PM_BASE -0x100000 |
20 | 20 | ||
21 | .section .bss, "wa", @nobits | ||
22 | .global disable_idle_sleep | ||
23 | .type disable_idle_sleep, @object | ||
24 | disable_idle_sleep: | ||
25 | .int 4 | ||
26 | .size disable_idle_sleep, . - disable_idle_sleep | ||
27 | |||
28 | /* Keep this close to the irq handlers */ | 21 | /* Keep this close to the irq handlers */ |
29 | .section .irq.text, "ax", @progbits | 22 | .section .irq.text, "ax", @progbits |
30 | 23 | ||
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 9782c0329c14..4aa5545c4fde 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c | |||
@@ -46,15 +46,14 @@ EXPORT_SYMBOL(pm_power_off); | |||
46 | * The idle loop on BFIN | 46 | * The idle loop on BFIN |
47 | */ | 47 | */ |
48 | #ifdef CONFIG_IDLE_L1 | 48 | #ifdef CONFIG_IDLE_L1 |
49 | static void default_idle(void)__attribute__((l1_text)); | 49 | void arch_cpu_idle(void)__attribute__((l1_text)); |
50 | void cpu_idle(void)__attribute__((l1_text)); | ||
51 | #endif | 50 | #endif |
52 | 51 | ||
53 | /* | 52 | /* |
54 | * This is our default idle handler. We need to disable | 53 | * This is our default idle handler. We need to disable |
55 | * interrupts here to ensure we don't miss a wakeup call. | 54 | * interrupts here to ensure we don't miss a wakeup call. |
56 | */ | 55 | */ |
57 | static void default_idle(void) | 56 | void arch_cpu_idle(void) |
58 | { | 57 | { |
59 | #ifdef CONFIG_IPIPE | 58 | #ifdef CONFIG_IPIPE |
60 | ipipe_suspend_domain(); | 59 | ipipe_suspend_domain(); |
@@ -66,31 +65,12 @@ static void default_idle(void) | |||
66 | hard_local_irq_enable(); | 65 | hard_local_irq_enable(); |
67 | } | 66 | } |
68 | 67 | ||
69 | /* | ||
70 | * The idle thread. We try to conserve power, while trying to keep | ||
71 | * overall latency low. The architecture specific idle is passed | ||
72 | * a value to indicate the level of "idleness" of the system. | ||
73 | */ | ||
74 | void cpu_idle(void) | ||
75 | { | ||
76 | /* endless idle loop with no priority at all */ | ||
77 | while (1) { | ||
78 | |||
79 | #ifdef CONFIG_HOTPLUG_CPU | 68 | #ifdef CONFIG_HOTPLUG_CPU |
80 | if (cpu_is_offline(smp_processor_id())) | 69 | void arch_cpu_idle_dead(void) |
81 | cpu_die(); | 70 | { |
82 | #endif | 71 | cpu_die(); |
83 | tick_nohz_idle_enter(); | ||
84 | rcu_idle_enter(); | ||
85 | while (!need_resched()) | ||
86 | default_idle(); | ||
87 | rcu_idle_exit(); | ||
88 | tick_nohz_idle_exit(); | ||
89 | preempt_enable_no_resched(); | ||
90 | schedule(); | ||
91 | preempt_disable(); | ||
92 | } | ||
93 | } | 72 | } |
73 | #endif | ||
94 | 74 | ||
95 | /* | 75 | /* |
96 | * Do necessary setup to start up a newly executed thread. | 76 | * Do necessary setup to start up a newly executed thread. |
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index bb61ae4986e4..1bc2ce6f3c94 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c | |||
@@ -335,7 +335,7 @@ void __cpuinit secondary_start_kernel(void) | |||
335 | */ | 335 | */ |
336 | calibrate_delay(); | 336 | calibrate_delay(); |
337 | 337 | ||
338 | cpu_idle(); | 338 | cpu_startup_entry(CPUHP_ONLINE); |
339 | } | 339 | } |
340 | 340 | ||
341 | void __init smp_prepare_boot_cpu(void) | 341 | void __init smp_prepare_boot_cpu(void) |
diff --git a/arch/c6x/kernel/process.c b/arch/c6x/kernel/process.c index 6434df476f77..57d2ea8d1977 100644 --- a/arch/c6x/kernel/process.c +++ b/arch/c6x/kernel/process.c | |||
@@ -33,7 +33,7 @@ extern asmlinkage void ret_from_kernel_thread(void); | |||
33 | void (*pm_power_off)(void); | 33 | void (*pm_power_off)(void); |
34 | EXPORT_SYMBOL(pm_power_off); | 34 | EXPORT_SYMBOL(pm_power_off); |
35 | 35 | ||
36 | static void c6x_idle(void) | 36 | void arch_cpu_idle(void) |
37 | { | 37 | { |
38 | unsigned long tmp; | 38 | unsigned long tmp; |
39 | 39 | ||
@@ -49,32 +49,6 @@ static void c6x_idle(void) | |||
49 | : "=b"(tmp)); | 49 | : "=b"(tmp)); |
50 | } | 50 | } |
51 | 51 | ||
52 | /* | ||
53 | * The idle loop for C64x | ||
54 | */ | ||
55 | void cpu_idle(void) | ||
56 | { | ||
57 | /* endless idle loop with no priority at all */ | ||
58 | while (1) { | ||
59 | tick_nohz_idle_enter(); | ||
60 | rcu_idle_enter(); | ||
61 | while (1) { | ||
62 | local_irq_disable(); | ||
63 | if (need_resched()) { | ||
64 | local_irq_enable(); | ||
65 | break; | ||
66 | } | ||
67 | c6x_idle(); /* enables local irqs */ | ||
68 | } | ||
69 | rcu_idle_exit(); | ||
70 | tick_nohz_idle_exit(); | ||
71 | |||
72 | preempt_enable_no_resched(); | ||
73 | schedule(); | ||
74 | preempt_disable(); | ||
75 | } | ||
76 | } | ||
77 | |||
78 | static void halt_loop(void) | 52 | static void halt_loop(void) |
79 | { | 53 | { |
80 | printk(KERN_EMERG "System Halted, OK to turn off power\n"); | 54 | printk(KERN_EMERG "System Halted, OK to turn off power\n"); |
diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c index b1018750cffb..2ba23c13df68 100644 --- a/arch/cris/arch-v10/kernel/process.c +++ b/arch/cris/arch-v10/kernel/process.c | |||
@@ -30,8 +30,9 @@ void etrax_gpio_wake_up_check(void); /* drivers/gpio.c */ | |||
30 | void default_idle(void) | 30 | void default_idle(void) |
31 | { | 31 | { |
32 | #ifdef CONFIG_ETRAX_GPIO | 32 | #ifdef CONFIG_ETRAX_GPIO |
33 | etrax_gpio_wake_up_check(); | 33 | etrax_gpio_wake_up_check(); |
34 | #endif | 34 | #endif |
35 | local_irq_enable(); | ||
35 | } | 36 | } |
36 | 37 | ||
37 | /* | 38 | /* |
diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c index 2b23ef0e4452..57451faa9b20 100644 --- a/arch/cris/arch-v32/kernel/process.c +++ b/arch/cris/arch-v32/kernel/process.c | |||
@@ -20,18 +20,12 @@ | |||
20 | 20 | ||
21 | extern void stop_watchdog(void); | 21 | extern void stop_watchdog(void); |
22 | 22 | ||
23 | extern int cris_hlt_counter; | ||
24 | |||
25 | /* We use this if we don't have any better idle routine. */ | 23 | /* We use this if we don't have any better idle routine. */ |
26 | void default_idle(void) | 24 | void default_idle(void) |
27 | { | 25 | { |
28 | local_irq_disable(); | 26 | /* Halt until exception. */ |
29 | if (!need_resched() && !cris_hlt_counter) { | 27 | __asm__ volatile("ei \n\t" |
30 | /* Halt until exception. */ | 28 | "halt "); |
31 | __asm__ volatile("ei \n\t" | ||
32 | "halt "); | ||
33 | } | ||
34 | local_irq_enable(); | ||
35 | } | 29 | } |
36 | 30 | ||
37 | /* | 31 | /* |
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c index 04a16edd5401..cdd12028de0c 100644 --- a/arch/cris/arch-v32/kernel/smp.c +++ b/arch/cris/arch-v32/kernel/smp.c | |||
@@ -145,8 +145,6 @@ smp_boot_one_cpu(int cpuid, struct task_struct idle) | |||
145 | * specific stuff such as the local timer and the MMU. */ | 145 | * specific stuff such as the local timer and the MMU. */ |
146 | void __init smp_callin(void) | 146 | void __init smp_callin(void) |
147 | { | 147 | { |
148 | extern void cpu_idle(void); | ||
149 | |||
150 | int cpu = cpu_now_booting; | 148 | int cpu = cpu_now_booting; |
151 | reg_intr_vect_rw_mask vect_mask = {0}; | 149 | reg_intr_vect_rw_mask vect_mask = {0}; |
152 | 150 | ||
@@ -170,7 +168,7 @@ void __init smp_callin(void) | |||
170 | local_irq_enable(); | 168 | local_irq_enable(); |
171 | 169 | ||
172 | set_cpu_online(cpu, true); | 170 | set_cpu_online(cpu, true); |
173 | cpu_idle(); | 171 | cpu_startup_entry(CPUHP_ONLINE); |
174 | } | 172 | } |
175 | 173 | ||
176 | /* Stop execution on this CPU.*/ | 174 | /* Stop execution on this CPU.*/ |
diff --git a/arch/cris/include/asm/processor.h b/arch/cris/include/asm/processor.h index 675823f70c0f..c0a29b96b92b 100644 --- a/arch/cris/include/asm/processor.h +++ b/arch/cris/include/asm/processor.h | |||
@@ -65,13 +65,6 @@ static inline void release_thread(struct task_struct *dead_task) | |||
65 | 65 | ||
66 | #define cpu_relax() barrier() | 66 | #define cpu_relax() barrier() |
67 | 67 | ||
68 | /* | ||
69 | * disable hlt during certain critical i/o operations | ||
70 | */ | ||
71 | #define HAVE_DISABLE_HLT | ||
72 | void disable_hlt(void); | ||
73 | void enable_hlt(void); | ||
74 | |||
75 | void default_idle(void); | 68 | void default_idle(void); |
76 | 69 | ||
77 | #endif /* __ASM_CRIS_PROCESSOR_H */ | 70 | #endif /* __ASM_CRIS_PROCESSOR_H */ |
diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c index 104ff4dd9b98..b78498eb079b 100644 --- a/arch/cris/kernel/process.c +++ b/arch/cris/kernel/process.c | |||
@@ -29,59 +29,14 @@ | |||
29 | 29 | ||
30 | //#define DEBUG | 30 | //#define DEBUG |
31 | 31 | ||
32 | /* | ||
33 | * The hlt_counter, disable_hlt and enable_hlt is just here as a hook if | ||
34 | * there would ever be a halt sequence (for power save when idle) with | ||
35 | * some largish delay when halting or resuming *and* a driver that can't | ||
36 | * afford that delay. The hlt_counter would then be checked before | ||
37 | * executing the halt sequence, and the driver marks the unhaltable | ||
38 | * region by enable_hlt/disable_hlt. | ||
39 | */ | ||
40 | |||
41 | int cris_hlt_counter=0; | ||
42 | |||
43 | void disable_hlt(void) | ||
44 | { | ||
45 | cris_hlt_counter++; | ||
46 | } | ||
47 | |||
48 | EXPORT_SYMBOL(disable_hlt); | ||
49 | |||
50 | void enable_hlt(void) | ||
51 | { | ||
52 | cris_hlt_counter--; | ||
53 | } | ||
54 | |||
55 | EXPORT_SYMBOL(enable_hlt); | ||
56 | |||
57 | extern void default_idle(void); | 32 | extern void default_idle(void); |
58 | 33 | ||
59 | void (*pm_power_off)(void); | 34 | void (*pm_power_off)(void); |
60 | EXPORT_SYMBOL(pm_power_off); | 35 | EXPORT_SYMBOL(pm_power_off); |
61 | 36 | ||
62 | /* | 37 | void arch_cpu_idle(void) |
63 | * The idle thread. There's no useful work to be | ||
64 | * done, so just try to conserve power and have a | ||
65 | * low exit latency (ie sit in a loop waiting for | ||
66 | * somebody to say that they'd like to reschedule) | ||
67 | */ | ||
68 | |||
69 | void cpu_idle (void) | ||
70 | { | 38 | { |
71 | /* endless idle loop with no priority at all */ | 39 | default_idle(); |
72 | while (1) { | ||
73 | rcu_idle_enter(); | ||
74 | while (!need_resched()) { | ||
75 | /* | ||
76 | * Mark this as an RCU critical section so that | ||
77 | * synchronize_kernel() in the unload path waits | ||
78 | * for our completion. | ||
79 | */ | ||
80 | default_idle(); | ||
81 | } | ||
82 | rcu_idle_exit(); | ||
83 | schedule_preempt_disabled(); | ||
84 | } | ||
85 | } | 40 | } |
86 | 41 | ||
87 | void hard_reset_now (void); | 42 | void hard_reset_now (void); |
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c index 23916b2a12a2..5d40aeb7712e 100644 --- a/arch/frv/kernel/process.c +++ b/arch/frv/kernel/process.c | |||
@@ -59,29 +59,12 @@ static void core_sleep_idle(void) | |||
59 | mb(); | 59 | mb(); |
60 | } | 60 | } |
61 | 61 | ||
62 | void (*idle)(void) = core_sleep_idle; | 62 | void arch_cpu_idle(void) |
63 | |||
64 | /* | ||
65 | * The idle thread. There's no useful work to be | ||
66 | * done, so just try to conserve power and have a | ||
67 | * low exit latency (ie sit in a loop waiting for | ||
68 | * somebody to say that they'd like to reschedule) | ||
69 | */ | ||
70 | void cpu_idle(void) | ||
71 | { | 63 | { |
72 | /* endless idle loop with no priority at all */ | 64 | if (!frv_dma_inprogress) |
73 | while (1) { | 65 | core_sleep_idle(); |
74 | rcu_idle_enter(); | 66 | else |
75 | while (!need_resched()) { | 67 | local_irq_enable(); |
76 | check_pgt_cache(); | ||
77 | |||
78 | if (!frv_dma_inprogress && idle) | ||
79 | idle(); | ||
80 | } | ||
81 | rcu_idle_exit(); | ||
82 | |||
83 | schedule_preempt_disabled(); | ||
84 | } | ||
85 | } | 68 | } |
86 | 69 | ||
87 | void machine_restart(char * __unused) | 70 | void machine_restart(char * __unused) |
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index b609f63f1590..a17d2cd463d2 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c | |||
@@ -53,40 +53,13 @@ asmlinkage void ret_from_kernel_thread(void); | |||
53 | * The idle loop on an H8/300.. | 53 | * The idle loop on an H8/300.. |
54 | */ | 54 | */ |
55 | #if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM) | 55 | #if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM) |
56 | static void default_idle(void) | 56 | void arch_cpu_idle(void) |
57 | { | 57 | { |
58 | local_irq_disable(); | 58 | local_irq_enable(); |
59 | if (!need_resched()) { | 59 | /* XXX: race here! What if need_resched() gets set now? */ |
60 | local_irq_enable(); | 60 | __asm__("sleep"); |
61 | /* XXX: race here! What if need_resched() gets set now? */ | ||
62 | __asm__("sleep"); | ||
63 | } else | ||
64 | local_irq_enable(); | ||
65 | } | ||
66 | #else | ||
67 | static void default_idle(void) | ||
68 | { | ||
69 | cpu_relax(); | ||
70 | } | 61 | } |
71 | #endif | 62 | #endif |
72 | void (*idle)(void) = default_idle; | ||
73 | |||
74 | /* | ||
75 | * The idle thread. There's no useful work to be | ||
76 | * done, so just try to conserve power and have a | ||
77 | * low exit latency (ie sit in a loop waiting for | ||
78 | * somebody to say that they'd like to reschedule) | ||
79 | */ | ||
80 | void cpu_idle(void) | ||
81 | { | ||
82 | while (1) { | ||
83 | rcu_idle_enter(); | ||
84 | while (!need_resched()) | ||
85 | idle(); | ||
86 | rcu_idle_exit(); | ||
87 | schedule_preempt_disabled(); | ||
88 | } | ||
89 | } | ||
90 | 63 | ||
91 | void machine_restart(char * __unused) | 64 | void machine_restart(char * __unused) |
92 | { | 65 | { |
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c index 06ae9ffcabd5..9b948c619a03 100644 --- a/arch/hexagon/kernel/process.c +++ b/arch/hexagon/kernel/process.c | |||
@@ -51,28 +51,11 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) | |||
51 | * If hardware or VM offer wait termination even though interrupts | 51 | * If hardware or VM offer wait termination even though interrupts |
52 | * are disabled. | 52 | * are disabled. |
53 | */ | 53 | */ |
54 | static void default_idle(void) | 54 | void arch_cpu_idle(void) |
55 | { | 55 | { |
56 | __vmwait(); | 56 | __vmwait(); |
57 | } | 57 | /* interrupts wake us up, but irqs are still disabled */ |
58 | 58 | local_irq_enable(); | |
59 | void (*idle_sleep)(void) = default_idle; | ||
60 | |||
61 | void cpu_idle(void) | ||
62 | { | ||
63 | while (1) { | ||
64 | tick_nohz_idle_enter(); | ||
65 | local_irq_disable(); | ||
66 | while (!need_resched()) { | ||
67 | idle_sleep(); | ||
68 | /* interrupts wake us up, but aren't serviced */ | ||
69 | local_irq_enable(); /* service interrupt */ | ||
70 | local_irq_disable(); | ||
71 | } | ||
72 | local_irq_enable(); | ||
73 | tick_nohz_idle_exit(); | ||
74 | schedule(); | ||
75 | } | ||
76 | } | 59 | } |
77 | 60 | ||
78 | /* | 61 | /* |
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c index 8e095dffd070..0e364ca43198 100644 --- a/arch/hexagon/kernel/smp.c +++ b/arch/hexagon/kernel/smp.c | |||
@@ -184,7 +184,7 @@ void __cpuinit start_secondary(void) | |||
184 | 184 | ||
185 | local_irq_enable(); | 185 | local_irq_enable(); |
186 | 186 | ||
187 | cpu_idle(); | 187 | cpu_startup_entry(CPUHP_ONLINE); |
188 | } | 188 | } |
189 | 189 | ||
190 | 190 | ||
diff --git a/arch/ia64/include/asm/irqflags.h b/arch/ia64/include/asm/irqflags.h index 2b68d856dc78..1bf2cf2f4ab4 100644 --- a/arch/ia64/include/asm/irqflags.h +++ b/arch/ia64/include/asm/irqflags.h | |||
@@ -89,6 +89,7 @@ static inline bool arch_irqs_disabled(void) | |||
89 | 89 | ||
90 | static inline void arch_safe_halt(void) | 90 | static inline void arch_safe_halt(void) |
91 | { | 91 | { |
92 | arch_local_irq_enable(); | ||
92 | ia64_pal_halt_light(); /* PAL_HALT_LIGHT */ | 93 | ia64_pal_halt_light(); /* PAL_HALT_LIGHT */ |
93 | } | 94 | } |
94 | 95 | ||
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index 020d655ed082..cade13dd0299 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h | |||
@@ -131,8 +131,6 @@ struct thread_info { | |||
131 | #define TS_POLLING 1 /* true if in idle loop and not sleeping */ | 131 | #define TS_POLLING 1 /* true if in idle loop and not sleeping */ |
132 | #define TS_RESTORE_SIGMASK 2 /* restore signal mask in do_signal() */ | 132 | #define TS_RESTORE_SIGMASK 2 /* restore signal mask in do_signal() */ |
133 | 133 | ||
134 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | ||
135 | |||
136 | #ifndef __ASSEMBLY__ | 134 | #ifndef __ASSEMBLY__ |
137 | #define HAVE_SET_RESTORE_SIGMASK 1 | 135 | #define HAVE_SET_RESTORE_SIGMASK 1 |
138 | static inline void set_restore_sigmask(void) | 136 | static inline void set_restore_sigmask(void) |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 2eda28414abb..9ea25fce06d5 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/completion.h> | 42 | #include <linux/completion.h> |
43 | #include <linux/tracehook.h> | 43 | #include <linux/tracehook.h> |
44 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
45 | #include <linux/cpu.h> | ||
45 | 46 | ||
46 | #include <asm/errno.h> | 47 | #include <asm/errno.h> |
47 | #include <asm/intrinsics.h> | 48 | #include <asm/intrinsics.h> |
@@ -1322,8 +1323,6 @@ out: | |||
1322 | } | 1323 | } |
1323 | EXPORT_SYMBOL(pfm_unregister_buffer_fmt); | 1324 | EXPORT_SYMBOL(pfm_unregister_buffer_fmt); |
1324 | 1325 | ||
1325 | extern void update_pal_halt_status(int); | ||
1326 | |||
1327 | static int | 1326 | static int |
1328 | pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) | 1327 | pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) |
1329 | { | 1328 | { |
@@ -1371,9 +1370,9 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) | |||
1371 | cpu)); | 1370 | cpu)); |
1372 | 1371 | ||
1373 | /* | 1372 | /* |
1374 | * disable default_idle() to go to PAL_HALT | 1373 | * Force idle() into poll mode |
1375 | */ | 1374 | */ |
1376 | update_pal_halt_status(0); | 1375 | cpu_idle_poll_ctrl(true); |
1377 | 1376 | ||
1378 | UNLOCK_PFS(flags); | 1377 | UNLOCK_PFS(flags); |
1379 | 1378 | ||
@@ -1430,11 +1429,8 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu) | |||
1430 | is_syswide, | 1429 | is_syswide, |
1431 | cpu)); | 1430 | cpu)); |
1432 | 1431 | ||
1433 | /* | 1432 | /* Undo forced polling. Last session reenables pal_halt */ |
1434 | * if possible, enable default_idle() to go into PAL_HALT | 1433 | cpu_idle_poll_ctrl(false); |
1435 | */ | ||
1436 | if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0) | ||
1437 | update_pal_halt_status(1); | ||
1438 | 1434 | ||
1439 | UNLOCK_PFS(flags); | 1435 | UNLOCK_PFS(flags); |
1440 | 1436 | ||
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 6f7dc8b7b35c..a26fc640e4ce 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -209,41 +209,13 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) | |||
209 | local_irq_disable(); /* force interrupt disable */ | 209 | local_irq_disable(); /* force interrupt disable */ |
210 | } | 210 | } |
211 | 211 | ||
212 | static int pal_halt = 1; | ||
213 | static int can_do_pal_halt = 1; | ||
214 | |||
215 | static int __init nohalt_setup(char * str) | 212 | static int __init nohalt_setup(char * str) |
216 | { | 213 | { |
217 | pal_halt = can_do_pal_halt = 0; | 214 | cpu_idle_poll_ctrl(true); |
218 | return 1; | 215 | return 1; |
219 | } | 216 | } |
220 | __setup("nohalt", nohalt_setup); | 217 | __setup("nohalt", nohalt_setup); |
221 | 218 | ||
222 | void | ||
223 | update_pal_halt_status(int status) | ||
224 | { | ||
225 | can_do_pal_halt = pal_halt && status; | ||
226 | } | ||
227 | |||
228 | /* | ||
229 | * We use this if we don't have any better idle routine.. | ||
230 | */ | ||
231 | void | ||
232 | default_idle (void) | ||
233 | { | ||
234 | local_irq_enable(); | ||
235 | while (!need_resched()) { | ||
236 | if (can_do_pal_halt) { | ||
237 | local_irq_disable(); | ||
238 | if (!need_resched()) { | ||
239 | safe_halt(); | ||
240 | } | ||
241 | local_irq_enable(); | ||
242 | } else | ||
243 | cpu_relax(); | ||
244 | } | ||
245 | } | ||
246 | |||
247 | #ifdef CONFIG_HOTPLUG_CPU | 219 | #ifdef CONFIG_HOTPLUG_CPU |
248 | /* We don't actually take CPU down, just spin without interrupts. */ | 220 | /* We don't actually take CPU down, just spin without interrupts. */ |
249 | static inline void play_dead(void) | 221 | static inline void play_dead(void) |
@@ -270,47 +242,29 @@ static inline void play_dead(void) | |||
270 | } | 242 | } |
271 | #endif /* CONFIG_HOTPLUG_CPU */ | 243 | #endif /* CONFIG_HOTPLUG_CPU */ |
272 | 244 | ||
273 | void __attribute__((noreturn)) | 245 | void arch_cpu_idle_dead(void) |
274 | cpu_idle (void) | 246 | { |
247 | play_dead(); | ||
248 | } | ||
249 | |||
250 | void arch_cpu_idle(void) | ||
275 | { | 251 | { |
276 | void (*mark_idle)(int) = ia64_mark_idle; | 252 | void (*mark_idle)(int) = ia64_mark_idle; |
277 | int cpu = smp_processor_id(); | ||
278 | |||
279 | /* endless idle loop with no priority at all */ | ||
280 | while (1) { | ||
281 | rcu_idle_enter(); | ||
282 | if (can_do_pal_halt) { | ||
283 | current_thread_info()->status &= ~TS_POLLING; | ||
284 | /* | ||
285 | * TS_POLLING-cleared state must be visible before we | ||
286 | * test NEED_RESCHED: | ||
287 | */ | ||
288 | smp_mb(); | ||
289 | } else { | ||
290 | current_thread_info()->status |= TS_POLLING; | ||
291 | } | ||
292 | 253 | ||
293 | if (!need_resched()) { | ||
294 | #ifdef CONFIG_SMP | 254 | #ifdef CONFIG_SMP |
295 | min_xtp(); | 255 | min_xtp(); |
296 | #endif | 256 | #endif |
297 | rmb(); | 257 | rmb(); |
298 | if (mark_idle) | 258 | if (mark_idle) |
299 | (*mark_idle)(1); | 259 | (*mark_idle)(1); |
260 | |||
261 | safe_halt(); | ||
300 | 262 | ||
301 | default_idle(); | 263 | if (mark_idle) |
302 | if (mark_idle) | 264 | (*mark_idle)(0); |
303 | (*mark_idle)(0); | ||
304 | #ifdef CONFIG_SMP | 265 | #ifdef CONFIG_SMP |
305 | normal_xtp(); | 266 | normal_xtp(); |
306 | #endif | 267 | #endif |
307 | } | ||
308 | rcu_idle_exit(); | ||
309 | schedule_preempt_disabled(); | ||
310 | check_pgt_cache(); | ||
311 | if (cpu_is_offline(cpu)) | ||
312 | play_dead(); | ||
313 | } | ||
314 | } | 268 | } |
315 | 269 | ||
316 | void | 270 | void |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 500f1e4d9f9d..8d87168d218d 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -455,7 +455,7 @@ start_secondary (void *unused) | |||
455 | preempt_disable(); | 455 | preempt_disable(); |
456 | smp_callin(); | 456 | smp_callin(); |
457 | 457 | ||
458 | cpu_idle(); | 458 | cpu_startup_entry(CPUHP_ONLINE); |
459 | return 0; | 459 | return 0; |
460 | } | 460 | } |
461 | 461 | ||
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c index bde899e155d3..e2d049018c3b 100644 --- a/arch/m32r/kernel/process.c +++ b/arch/m32r/kernel/process.c | |||
@@ -47,24 +47,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
47 | void (*pm_power_off)(void) = NULL; | 47 | void (*pm_power_off)(void) = NULL; |
48 | EXPORT_SYMBOL(pm_power_off); | 48 | EXPORT_SYMBOL(pm_power_off); |
49 | 49 | ||
50 | /* | ||
51 | * The idle thread. There's no useful work to be | ||
52 | * done, so just try to conserve power and have a | ||
53 | * low exit latency (ie sit in a loop waiting for | ||
54 | * somebody to say that they'd like to reschedule) | ||
55 | */ | ||
56 | void cpu_idle (void) | ||
57 | { | ||
58 | /* endless idle loop with no priority at all */ | ||
59 | while (1) { | ||
60 | rcu_idle_enter(); | ||
61 | while (!need_resched()) | ||
62 | cpu_relax(); | ||
63 | rcu_idle_exit(); | ||
64 | schedule_preempt_disabled(); | ||
65 | } | ||
66 | } | ||
67 | |||
68 | void machine_restart(char *__unused) | 50 | void machine_restart(char *__unused) |
69 | { | 51 | { |
70 | #if defined(CONFIG_PLAT_MAPPI3) | 52 | #if defined(CONFIG_PLAT_MAPPI3) |
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index 13168a769f8f..0ac558adc605 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c | |||
@@ -432,7 +432,7 @@ int __init start_secondary(void *unused) | |||
432 | */ | 432 | */ |
433 | local_flush_tlb_all(); | 433 | local_flush_tlb_all(); |
434 | 434 | ||
435 | cpu_idle(); | 435 | cpu_startup_entry(CPUHP_ONLINE); |
436 | return 0; | 436 | return 0; |
437 | } | 437 | } |
438 | 438 | ||
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index d538694ad208..c55ff719fa72 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c | |||
@@ -51,40 +51,16 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
51 | return sw->retpc; | 51 | return sw->retpc; |
52 | } | 52 | } |
53 | 53 | ||
54 | /* | 54 | void arch_cpu_idle(void) |
55 | * The idle loop on an m68k.. | ||
56 | */ | ||
57 | static void default_idle(void) | ||
58 | { | 55 | { |
59 | if (!need_resched()) | ||
60 | #if defined(MACH_ATARI_ONLY) | 56 | #if defined(MACH_ATARI_ONLY) |
61 | /* block out HSYNC on the atari (falcon) */ | 57 | /* block out HSYNC on the atari (falcon) */ |
62 | __asm__("stop #0x2200" : : : "cc"); | 58 | __asm__("stop #0x2200" : : : "cc"); |
63 | #else | 59 | #else |
64 | __asm__("stop #0x2000" : : : "cc"); | 60 | __asm__("stop #0x2000" : : : "cc"); |
65 | #endif | 61 | #endif |
66 | } | 62 | } |
67 | 63 | ||
68 | void (*idle)(void) = default_idle; | ||
69 | |||
70 | /* | ||
71 | * The idle thread. There's no useful work to be | ||
72 | * done, so just try to conserve power and have a | ||
73 | * low exit latency (ie sit in a loop waiting for | ||
74 | * somebody to say that they'd like to reschedule) | ||
75 | */ | ||
76 | void cpu_idle(void) | ||
77 | { | ||
78 | /* endless idle loop with no priority at all */ | ||
79 | while (1) { | ||
80 | rcu_idle_enter(); | ||
81 | while (!need_resched()) | ||
82 | idle(); | ||
83 | rcu_idle_exit(); | ||
84 | schedule_preempt_disabled(); | ||
85 | } | ||
86 | } | ||
87 | |||
88 | void machine_restart(char * __unused) | 64 | void machine_restart(char * __unused) |
89 | { | 65 | { |
90 | if (mach_reset) | 66 | if (mach_reset) |
diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h index 0ecd34d8b5f6..7c4a33006142 100644 --- a/arch/metag/include/asm/thread_info.h +++ b/arch/metag/include/asm/thread_info.h | |||
@@ -150,6 +150,4 @@ static inline int kstack_end(void *addr) | |||
150 | #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ | 150 | #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ |
151 | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)) | 151 | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)) |
152 | 152 | ||
153 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) | ||
154 | |||
155 | #endif /* _ASM_THREAD_INFO_H */ | 153 | #endif /* _ASM_THREAD_INFO_H */ |
diff --git a/arch/metag/kernel/process.c b/arch/metag/kernel/process.c index c6efe62e5b76..dc5923544560 100644 --- a/arch/metag/kernel/process.c +++ b/arch/metag/kernel/process.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/pm.h> | 22 | #include <linux/pm.h> |
23 | #include <linux/syscalls.h> | 23 | #include <linux/syscalls.h> |
24 | #include <linux/uaccess.h> | 24 | #include <linux/uaccess.h> |
25 | #include <linux/smp.h> | ||
25 | #include <asm/core_reg.h> | 26 | #include <asm/core_reg.h> |
26 | #include <asm/user_gateway.h> | 27 | #include <asm/user_gateway.h> |
27 | #include <asm/tcm.h> | 28 | #include <asm/tcm.h> |
@@ -31,7 +32,7 @@ | |||
31 | /* | 32 | /* |
32 | * Wait for the next interrupt and enable local interrupts | 33 | * Wait for the next interrupt and enable local interrupts |
33 | */ | 34 | */ |
34 | static inline void arch_idle(void) | 35 | void arch_cpu_idle(void) |
35 | { | 36 | { |
36 | int tmp; | 37 | int tmp; |
37 | 38 | ||
@@ -59,36 +60,12 @@ static inline void arch_idle(void) | |||
59 | : "r" (get_trigger_mask())); | 60 | : "r" (get_trigger_mask())); |
60 | } | 61 | } |
61 | 62 | ||
62 | void cpu_idle(void) | ||
63 | { | ||
64 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
65 | |||
66 | while (1) { | ||
67 | tick_nohz_idle_enter(); | ||
68 | rcu_idle_enter(); | ||
69 | |||
70 | while (!need_resched()) { | ||
71 | /* | ||
72 | * We need to disable interrupts here to ensure we don't | ||
73 | * miss a wakeup call. | ||
74 | */ | ||
75 | local_irq_disable(); | ||
76 | if (!need_resched()) { | ||
77 | #ifdef CONFIG_HOTPLUG_CPU | 63 | #ifdef CONFIG_HOTPLUG_CPU |
78 | if (cpu_is_offline(smp_processor_id())) | 64 | void arch_cpu_idle_dead(void) |
79 | cpu_die(); | 65 | { |
80 | #endif | 66 | cpu_die(); |
81 | arch_idle(); | ||
82 | } else { | ||
83 | local_irq_enable(); | ||
84 | } | ||
85 | } | ||
86 | |||
87 | rcu_idle_exit(); | ||
88 | tick_nohz_idle_exit(); | ||
89 | schedule_preempt_disabled(); | ||
90 | } | ||
91 | } | 67 | } |
68 | #endif | ||
92 | 69 | ||
93 | void (*pm_power_off)(void); | 70 | void (*pm_power_off)(void); |
94 | EXPORT_SYMBOL(pm_power_off); | 71 | EXPORT_SYMBOL(pm_power_off); |
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c index 4b6d1f14df32..4de8fc8e31a5 100644 --- a/arch/metag/kernel/smp.c +++ b/arch/metag/kernel/smp.c | |||
@@ -297,7 +297,7 @@ asmlinkage void secondary_start_kernel(void) | |||
297 | /* | 297 | /* |
298 | * OK, it's off to the idle thread for us | 298 | * OK, it's off to the idle thread for us |
299 | */ | 299 | */ |
300 | cpu_idle(); | 300 | cpu_startup_entry(CPUHP_ONLINE); |
301 | } | 301 | } |
302 | 302 | ||
303 | void __init smp_cpus_done(unsigned int max_cpus) | 303 | void __init smp_cpus_done(unsigned int max_cpus) |
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 1323fa2530eb..a827057c7927 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -26,6 +26,7 @@ config MICROBLAZE | |||
26 | select GENERIC_CPU_DEVICES | 26 | select GENERIC_CPU_DEVICES |
27 | select GENERIC_ATOMIC64 | 27 | select GENERIC_ATOMIC64 |
28 | select GENERIC_CLOCKEVENTS | 28 | select GENERIC_CLOCKEVENTS |
29 | select GENERIC_IDLE_POLL_SETUP | ||
29 | select MODULES_USE_ELF_RELA | 30 | select MODULES_USE_ELF_RELA |
30 | select CLONE_BACKWARDS | 31 | select CLONE_BACKWARDS |
31 | 32 | ||
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h index 0759153e8117..d6e0ffea28b6 100644 --- a/arch/microblaze/include/asm/processor.h +++ b/arch/microblaze/include/asm/processor.h | |||
@@ -22,7 +22,6 @@ | |||
22 | extern const struct seq_operations cpuinfo_op; | 22 | extern const struct seq_operations cpuinfo_op; |
23 | 23 | ||
24 | # define cpu_relax() barrier() | 24 | # define cpu_relax() barrier() |
25 | # define cpu_sleep() do {} while (0) | ||
26 | 25 | ||
27 | #define task_pt_regs(tsk) \ | 26 | #define task_pt_regs(tsk) \ |
28 | (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1) | 27 | (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1) |
@@ -160,10 +159,6 @@ unsigned long get_wchan(struct task_struct *p); | |||
160 | # define STACK_TOP TASK_SIZE | 159 | # define STACK_TOP TASK_SIZE |
161 | # define STACK_TOP_MAX STACK_TOP | 160 | # define STACK_TOP_MAX STACK_TOP |
162 | 161 | ||
163 | void disable_hlt(void); | ||
164 | void enable_hlt(void); | ||
165 | void default_idle(void); | ||
166 | |||
167 | #ifdef CONFIG_DEBUG_FS | 162 | #ifdef CONFIG_DEBUG_FS |
168 | extern struct dentry *of_debugfs_root; | 163 | extern struct dentry *of_debugfs_root; |
169 | #endif | 164 | #endif |
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h index 008f30433d22..de26ea6373de 100644 --- a/arch/microblaze/include/asm/thread_info.h +++ b/arch/microblaze/include/asm/thread_info.h | |||
@@ -182,7 +182,6 @@ static inline bool test_and_clear_restore_sigmask(void) | |||
182 | ti->status &= ~TS_RESTORE_SIGMASK; | 182 | ti->status &= ~TS_RESTORE_SIGMASK; |
183 | return true; | 183 | return true; |
184 | } | 184 | } |
185 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) | ||
186 | #endif | 185 | #endif |
187 | 186 | ||
188 | #endif /* __KERNEL__ */ | 187 | #endif /* __KERNEL__ */ |
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index fa0ea609137c..7cce2e9c1719 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c | |||
@@ -44,71 +44,6 @@ void show_regs(struct pt_regs *regs) | |||
44 | void (*pm_power_off)(void) = NULL; | 44 | void (*pm_power_off)(void) = NULL; |
45 | EXPORT_SYMBOL(pm_power_off); | 45 | EXPORT_SYMBOL(pm_power_off); |
46 | 46 | ||
47 | static int hlt_counter = 1; | ||
48 | |||
49 | void disable_hlt(void) | ||
50 | { | ||
51 | hlt_counter++; | ||
52 | } | ||
53 | EXPORT_SYMBOL(disable_hlt); | ||
54 | |||
55 | void enable_hlt(void) | ||
56 | { | ||
57 | hlt_counter--; | ||
58 | } | ||
59 | EXPORT_SYMBOL(enable_hlt); | ||
60 | |||
61 | static int __init nohlt_setup(char *__unused) | ||
62 | { | ||
63 | hlt_counter = 1; | ||
64 | return 1; | ||
65 | } | ||
66 | __setup("nohlt", nohlt_setup); | ||
67 | |||
68 | static int __init hlt_setup(char *__unused) | ||
69 | { | ||
70 | hlt_counter = 0; | ||
71 | return 1; | ||
72 | } | ||
73 | __setup("hlt", hlt_setup); | ||
74 | |||
75 | void default_idle(void) | ||
76 | { | ||
77 | if (likely(hlt_counter)) { | ||
78 | local_irq_disable(); | ||
79 | stop_critical_timings(); | ||
80 | cpu_relax(); | ||
81 | start_critical_timings(); | ||
82 | local_irq_enable(); | ||
83 | } else { | ||
84 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
85 | smp_mb__after_clear_bit(); | ||
86 | local_irq_disable(); | ||
87 | while (!need_resched()) | ||
88 | cpu_sleep(); | ||
89 | local_irq_enable(); | ||
90 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
91 | } | ||
92 | } | ||
93 | |||
94 | void cpu_idle(void) | ||
95 | { | ||
96 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
97 | |||
98 | /* endless idle loop with no priority at all */ | ||
99 | while (1) { | ||
100 | tick_nohz_idle_enter(); | ||
101 | rcu_idle_enter(); | ||
102 | while (!need_resched()) | ||
103 | default_idle(); | ||
104 | rcu_idle_exit(); | ||
105 | tick_nohz_idle_exit(); | ||
106 | |||
107 | schedule_preempt_disabled(); | ||
108 | check_pgt_cache(); | ||
109 | } | ||
110 | } | ||
111 | |||
112 | void flush_thread(void) | 47 | void flush_thread(void) |
113 | { | 48 | { |
114 | } | 49 | } |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 3be4405c2d14..cfc742d75b7f 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -41,44 +41,26 @@ | |||
41 | #include <asm/inst.h> | 41 | #include <asm/inst.h> |
42 | #include <asm/stacktrace.h> | 42 | #include <asm/stacktrace.h> |
43 | 43 | ||
44 | /* | 44 | #ifdef CONFIG_HOTPLUG_CPU |
45 | * The idle thread. There's no useful work to be done, so just try to conserve | 45 | void arch_cpu_idle_dead(void) |
46 | * power and have a low exit latency (ie sit in a loop waiting for somebody to | ||
47 | * say that they'd like to reschedule) | ||
48 | */ | ||
49 | void __noreturn cpu_idle(void) | ||
50 | { | 46 | { |
51 | int cpu; | 47 | /* What the heck is this check doing ? */ |
52 | 48 | if (!cpu_isset(smp_processor_id(), cpu_callin_map)) | |
53 | /* CPU is going idle. */ | 49 | play_dead(); |
54 | cpu = smp_processor_id(); | 50 | } |
51 | #endif | ||
55 | 52 | ||
56 | /* endless idle loop with no priority at all */ | 53 | void arch_cpu_idle(void) |
57 | while (1) { | 54 | { |
58 | tick_nohz_idle_enter(); | ||
59 | rcu_idle_enter(); | ||
60 | while (!need_resched() && cpu_online(cpu)) { | ||
61 | #ifdef CONFIG_MIPS_MT_SMTC | 55 | #ifdef CONFIG_MIPS_MT_SMTC |
62 | extern void smtc_idle_loop_hook(void); | 56 | extern void smtc_idle_loop_hook(void); |
63 | 57 | ||
64 | smtc_idle_loop_hook(); | 58 | smtc_idle_loop_hook(); |
65 | #endif | 59 | #endif |
66 | 60 | if (cpu_wait) | |
67 | if (cpu_wait) { | 61 | (*cpu_wait)(); |
68 | /* Don't trace irqs off for idle */ | 62 | else |
69 | stop_critical_timings(); | 63 | local_irq_enable(); |
70 | (*cpu_wait)(); | ||
71 | start_critical_timings(); | ||
72 | } | ||
73 | } | ||
74 | #ifdef CONFIG_HOTPLUG_CPU | ||
75 | if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map)) | ||
76 | play_dead(); | ||
77 | #endif | ||
78 | rcu_idle_exit(); | ||
79 | tick_nohz_idle_exit(); | ||
80 | schedule_preempt_disabled(); | ||
81 | } | ||
82 | } | 64 | } |
83 | 65 | ||
84 | asmlinkage void ret_from_fork(void); | 66 | asmlinkage void ret_from_fork(void); |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 66bf4e22d9b9..aee04af213c5 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -139,7 +139,7 @@ asmlinkage __cpuinit void start_secondary(void) | |||
139 | WARN_ON_ONCE(!irqs_disabled()); | 139 | WARN_ON_ONCE(!irqs_disabled()); |
140 | mp_ops->smp_finish(); | 140 | mp_ops->smp_finish(); |
141 | 141 | ||
142 | cpu_idle(); | 142 | cpu_startup_entry(CPUHP_ONLINE); |
143 | } | 143 | } |
144 | 144 | ||
145 | /* | 145 | /* |
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h index f90062b0622d..224b4262486d 100644 --- a/arch/mn10300/include/asm/thread_info.h +++ b/arch/mn10300/include/asm/thread_info.h | |||
@@ -165,8 +165,6 @@ void arch_release_thread_info(struct thread_info *ti); | |||
165 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ | 165 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ |
166 | #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ | 166 | #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ |
167 | 167 | ||
168 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) | ||
169 | |||
170 | #endif /* __KERNEL__ */ | 168 | #endif /* __KERNEL__ */ |
171 | 169 | ||
172 | #endif /* _ASM_THREAD_INFO_H */ | 170 | #endif /* _ASM_THREAD_INFO_H */ |
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index 84f4e97e3074..2da39fb8b3b2 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c | |||
@@ -50,77 +50,19 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
50 | void (*pm_power_off)(void); | 50 | void (*pm_power_off)(void); |
51 | EXPORT_SYMBOL(pm_power_off); | 51 | EXPORT_SYMBOL(pm_power_off); |
52 | 52 | ||
53 | #if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) | ||
54 | /* | ||
55 | * we use this if we don't have any better idle routine | ||
56 | */ | ||
57 | static void default_idle(void) | ||
58 | { | ||
59 | local_irq_disable(); | ||
60 | if (!need_resched()) | ||
61 | safe_halt(); | ||
62 | else | ||
63 | local_irq_enable(); | ||
64 | } | ||
65 | |||
66 | #else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */ | ||
67 | /* | 53 | /* |
68 | * On SMP it's slightly faster (but much more power-consuming!) | 54 | * On SMP it's slightly faster (but much more power-consuming!) |
69 | * to poll the ->work.need_resched flag instead of waiting for the | 55 | * to poll the ->work.need_resched flag instead of waiting for the |
70 | * cross-CPU IPI to arrive. Use this option with caution. | 56 | * cross-CPU IPI to arrive. Use this option with caution. |
57 | * | ||
58 | * tglx: No idea why this depends on HOTPLUG_CPU !?! | ||
71 | */ | 59 | */ |
72 | static inline void poll_idle(void) | 60 | #if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) |
73 | { | 61 | void arch_cpu_idle(void) |
74 | int oldval; | ||
75 | |||
76 | local_irq_enable(); | ||
77 | |||
78 | /* | ||
79 | * Deal with another CPU just having chosen a thread to | ||
80 | * run here: | ||
81 | */ | ||
82 | oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); | ||
83 | |||
84 | if (!oldval) { | ||
85 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
86 | while (!need_resched()) | ||
87 | cpu_relax(); | ||
88 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
89 | } else { | ||
90 | set_need_resched(); | ||
91 | } | ||
92 | } | ||
93 | #endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */ | ||
94 | |||
95 | /* | ||
96 | * the idle thread | ||
97 | * - there's no useful work to be done, so just try to conserve power and have | ||
98 | * a low exit latency (ie sit in a loop waiting for somebody to say that | ||
99 | * they'd like to reschedule) | ||
100 | */ | ||
101 | void cpu_idle(void) | ||
102 | { | 62 | { |
103 | /* endless idle loop with no priority at all */ | 63 | safe_halt(); |
104 | for (;;) { | ||
105 | rcu_idle_enter(); | ||
106 | while (!need_resched()) { | ||
107 | void (*idle)(void); | ||
108 | |||
109 | smp_rmb(); | ||
110 | if (!idle) { | ||
111 | #if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU) | ||
112 | idle = poll_idle; | ||
113 | #else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */ | ||
114 | idle = default_idle; | ||
115 | #endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */ | ||
116 | } | ||
117 | idle(); | ||
118 | } | ||
119 | rcu_idle_exit(); | ||
120 | |||
121 | schedule_preempt_disabled(); | ||
122 | } | ||
123 | } | 64 | } |
65 | #endif | ||
124 | 66 | ||
125 | void release_segments(struct mm_struct *mm) | 67 | void release_segments(struct mm_struct *mm) |
126 | { | 68 | { |
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c index 5d7e152a23b7..a17f9c9c14c9 100644 --- a/arch/mn10300/kernel/smp.c +++ b/arch/mn10300/kernel/smp.c | |||
@@ -675,7 +675,7 @@ int __init start_secondary(void *unused) | |||
675 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | 675 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
676 | init_clockevents(); | 676 | init_clockevents(); |
677 | #endif | 677 | #endif |
678 | cpu_idle(); | 678 | cpu_startup_entry(CPUHP_ONLINE); |
679 | return 0; | 679 | return 0; |
680 | } | 680 | } |
681 | 681 | ||
@@ -935,8 +935,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
935 | int timeout; | 935 | int timeout; |
936 | 936 | ||
937 | #ifdef CONFIG_HOTPLUG_CPU | 937 | #ifdef CONFIG_HOTPLUG_CPU |
938 | if (num_online_cpus() == 1) | ||
939 | disable_hlt(); | ||
940 | if (sleep_mode[cpu]) | 938 | if (sleep_mode[cpu]) |
941 | run_wakeup_cpu(cpu); | 939 | run_wakeup_cpu(cpu); |
942 | #endif /* CONFIG_HOTPLUG_CPU */ | 940 | #endif /* CONFIG_HOTPLUG_CPU */ |
@@ -1003,9 +1001,6 @@ int __cpu_disable(void) | |||
1003 | void __cpu_die(unsigned int cpu) | 1001 | void __cpu_die(unsigned int cpu) |
1004 | { | 1002 | { |
1005 | run_sleep_cpu(cpu); | 1003 | run_sleep_cpu(cpu); |
1006 | |||
1007 | if (num_online_cpus() == 1) | ||
1008 | enable_hlt(); | ||
1009 | } | 1004 | } |
1010 | 1005 | ||
1011 | #ifdef CONFIG_MN10300_CACHE_ENABLED | 1006 | #ifdef CONFIG_MN10300_CACHE_ENABLED |
diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h index 07f3212422ad..d797acc901e4 100644 --- a/arch/openrisc/include/asm/thread_info.h +++ b/arch/openrisc/include/asm/thread_info.h | |||
@@ -128,8 +128,6 @@ register struct thread_info *current_thread_info_reg asm("r10"); | |||
128 | /* For OpenRISC, this is anything in the LSW other than syscall trace */ | 128 | /* For OpenRISC, this is anything in the LSW other than syscall trace */ |
129 | #define _TIF_WORK_MASK (0xff & ~(_TIF_SYSCALL_TRACE|_TIF_SINGLESTEP)) | 129 | #define _TIF_WORK_MASK (0xff & ~(_TIF_SYSCALL_TRACE|_TIF_SINGLESTEP)) |
130 | 130 | ||
131 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) | ||
132 | |||
133 | #endif /* __KERNEL__ */ | 131 | #endif /* __KERNEL__ */ |
134 | 132 | ||
135 | #endif /* _ASM_THREAD_INFO_H */ | 133 | #endif /* _ASM_THREAD_INFO_H */ |
diff --git a/arch/openrisc/kernel/Makefile b/arch/openrisc/kernel/Makefile index 35f92ce51c24..ec6d9d37cefd 100644 --- a/arch/openrisc/kernel/Makefile +++ b/arch/openrisc/kernel/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | extra-y := head.o vmlinux.lds | 5 | extra-y := head.o vmlinux.lds |
6 | 6 | ||
7 | obj-y := setup.o idle.o or32_ksyms.o process.o dma.o \ | 7 | obj-y := setup.o or32_ksyms.o process.o dma.o \ |
8 | traps.o time.o irq.o entry.o ptrace.o signal.o \ | 8 | traps.o time.o irq.o entry.o ptrace.o signal.o \ |
9 | sys_call_table.o | 9 | sys_call_table.o |
10 | 10 | ||
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c deleted file mode 100644 index 5e8a3b6d6bc6..000000000000 --- a/arch/openrisc/kernel/idle.c +++ /dev/null | |||
@@ -1,73 +0,0 @@ | |||
1 | /* | ||
2 | * OpenRISC idle.c | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * Idle daemon for or32. Idle daemon will handle any action | ||
18 | * that needs to be taken when the system becomes idle. | ||
19 | */ | ||
20 | |||
21 | #include <linux/errno.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/smp.h> | ||
26 | #include <linux/stddef.h> | ||
27 | #include <linux/unistd.h> | ||
28 | #include <linux/ptrace.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/tick.h> | ||
31 | |||
32 | #include <asm/pgtable.h> | ||
33 | #include <asm/uaccess.h> | ||
34 | #include <asm/io.h> | ||
35 | #include <asm/processor.h> | ||
36 | #include <asm/mmu.h> | ||
37 | #include <asm/cache.h> | ||
38 | #include <asm/pgalloc.h> | ||
39 | |||
40 | void (*powersave) (void) = NULL; | ||
41 | |||
42 | void cpu_idle(void) | ||
43 | { | ||
44 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
45 | |||
46 | /* endless idle loop with no priority at all */ | ||
47 | while (1) { | ||
48 | tick_nohz_idle_enter(); | ||
49 | rcu_idle_enter(); | ||
50 | |||
51 | while (!need_resched()) { | ||
52 | check_pgt_cache(); | ||
53 | rmb(); | ||
54 | |||
55 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
56 | |||
57 | local_irq_disable(); | ||
58 | /* Don't trace irqs off for idle */ | ||
59 | stop_critical_timings(); | ||
60 | if (!need_resched() && powersave != NULL) | ||
61 | powersave(); | ||
62 | start_critical_timings(); | ||
63 | local_irq_enable(); | ||
64 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
65 | } | ||
66 | |||
67 | rcu_idle_exit(); | ||
68 | tick_nohz_idle_exit(); | ||
69 | preempt_enable_no_resched(); | ||
70 | schedule(); | ||
71 | preempt_disable(); | ||
72 | } | ||
73 | } | ||
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index d1fb79a36f3d..6182832e5b6c 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h | |||
@@ -77,8 +77,6 @@ struct thread_info { | |||
77 | #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ | 77 | #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ |
78 | _TIF_BLOCKSTEP) | 78 | _TIF_BLOCKSTEP) |
79 | 79 | ||
80 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) | ||
81 | |||
82 | #endif /* __KERNEL__ */ | 80 | #endif /* __KERNEL__ */ |
83 | 81 | ||
84 | #endif /* _ASM_PARISC_THREAD_INFO_H */ | 82 | #endif /* _ASM_PARISC_THREAD_INFO_H */ |
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index d13507246c5d..55f92b614182 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c | |||
@@ -59,28 +59,6 @@ | |||
59 | #include <asm/unwind.h> | 59 | #include <asm/unwind.h> |
60 | #include <asm/sections.h> | 60 | #include <asm/sections.h> |
61 | 61 | ||
62 | /* | ||
63 | * The idle thread. There's no useful work to be | ||
64 | * done, so just try to conserve power and have a | ||
65 | * low exit latency (ie sit in a loop waiting for | ||
66 | * somebody to say that they'd like to reschedule) | ||
67 | */ | ||
68 | void cpu_idle(void) | ||
69 | { | ||
70 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
71 | |||
72 | /* endless idle loop with no priority at all */ | ||
73 | while (1) { | ||
74 | rcu_idle_enter(); | ||
75 | while (!need_resched()) | ||
76 | barrier(); | ||
77 | rcu_idle_exit(); | ||
78 | schedule_preempt_disabled(); | ||
79 | check_pgt_cache(); | ||
80 | } | ||
81 | } | ||
82 | |||
83 | |||
84 | #define COMMAND_GLOBAL F_EXTEND(0xfffe0030) | 62 | #define COMMAND_GLOBAL F_EXTEND(0xfffe0030) |
85 | #define CMD_RESET 5 /* reset any module */ | 63 | #define CMD_RESET 5 /* reset any module */ |
86 | 64 | ||
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 6266730efd61..fd1bb1519c2b 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
@@ -329,7 +329,7 @@ void __init smp_callin(void) | |||
329 | 329 | ||
330 | local_irq_enable(); /* Interrupts have been off until now */ | 330 | local_irq_enable(); /* Interrupts have been off until now */ |
331 | 331 | ||
332 | cpu_idle(); /* Wait for timer to schedule some work */ | 332 | cpu_startup_entry(CPUHP_ONLINE); |
333 | 333 | ||
334 | /* NOTREACHED */ | 334 | /* NOTREACHED */ |
335 | panic("smp_callin() AAAAaaaaahhhh....\n"); | 335 | panic("smp_callin() AAAAaaaaahhhh....\n"); |
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 406b7b9a1341..8ceea14d6fe4 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h | |||
@@ -182,8 +182,6 @@ static inline bool test_thread_local_flags(unsigned int flags) | |||
182 | #define is_32bit_task() (1) | 182 | #define is_32bit_task() (1) |
183 | #endif | 183 | #endif |
184 | 184 | ||
185 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) | ||
186 | |||
187 | #endif /* !__ASSEMBLY__ */ | 185 | #endif /* !__ASSEMBLY__ */ |
188 | 186 | ||
189 | #endif /* __KERNEL__ */ | 187 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index ea78761aa169..939ea7ef0dc8 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c | |||
@@ -33,11 +33,6 @@ | |||
33 | #include <asm/runlatch.h> | 33 | #include <asm/runlatch.h> |
34 | #include <asm/smp.h> | 34 | #include <asm/smp.h> |
35 | 35 | ||
36 | #ifdef CONFIG_HOTPLUG_CPU | ||
37 | #define cpu_should_die() cpu_is_offline(smp_processor_id()) | ||
38 | #else | ||
39 | #define cpu_should_die() 0 | ||
40 | #endif | ||
41 | 36 | ||
42 | unsigned long cpuidle_disable = IDLE_NO_OVERRIDE; | 37 | unsigned long cpuidle_disable = IDLE_NO_OVERRIDE; |
43 | EXPORT_SYMBOL(cpuidle_disable); | 38 | EXPORT_SYMBOL(cpuidle_disable); |
@@ -50,64 +45,38 @@ static int __init powersave_off(char *arg) | |||
50 | } | 45 | } |
51 | __setup("powersave=off", powersave_off); | 46 | __setup("powersave=off", powersave_off); |
52 | 47 | ||
53 | /* | 48 | #ifdef CONFIG_HOTPLUG_CPU |
54 | * The body of the idle task. | 49 | void arch_cpu_idle_dead(void) |
55 | */ | ||
56 | void cpu_idle(void) | ||
57 | { | 50 | { |
58 | set_thread_flag(TIF_POLLING_NRFLAG); | 51 | sched_preempt_enable_no_resched(); |
59 | while (1) { | 52 | cpu_die(); |
60 | tick_nohz_idle_enter(); | 53 | } |
61 | rcu_idle_enter(); | 54 | #endif |
62 | |||
63 | while (!need_resched() && !cpu_should_die()) { | ||
64 | ppc64_runlatch_off(); | ||
65 | |||
66 | if (ppc_md.power_save) { | ||
67 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
68 | /* | ||
69 | * smp_mb is so clearing of TIF_POLLING_NRFLAG | ||
70 | * is ordered w.r.t. need_resched() test. | ||
71 | */ | ||
72 | smp_mb(); | ||
73 | local_irq_disable(); | ||
74 | |||
75 | /* Don't trace irqs off for idle */ | ||
76 | stop_critical_timings(); | ||
77 | |||
78 | /* check again after disabling irqs */ | ||
79 | if (!need_resched() && !cpu_should_die()) | ||
80 | ppc_md.power_save(); | ||
81 | |||
82 | start_critical_timings(); | ||
83 | |||
84 | /* Some power_save functions return with | ||
85 | * interrupts enabled, some don't. | ||
86 | */ | ||
87 | if (irqs_disabled()) | ||
88 | local_irq_enable(); | ||
89 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
90 | |||
91 | } else { | ||
92 | /* | ||
93 | * Go into low thread priority and possibly | ||
94 | * low power mode. | ||
95 | */ | ||
96 | HMT_low(); | ||
97 | HMT_very_low(); | ||
98 | } | ||
99 | } | ||
100 | 55 | ||
101 | HMT_medium(); | 56 | void arch_cpu_idle(void) |
102 | ppc64_runlatch_on(); | 57 | { |
103 | rcu_idle_exit(); | 58 | ppc64_runlatch_off(); |
104 | tick_nohz_idle_exit(); | 59 | |
105 | if (cpu_should_die()) { | 60 | if (ppc_md.power_save) { |
106 | sched_preempt_enable_no_resched(); | 61 | ppc_md.power_save(); |
107 | cpu_die(); | 62 | /* |
108 | } | 63 | * Some power_save functions return with |
109 | schedule_preempt_disabled(); | 64 | * interrupts enabled, some don't. |
65 | */ | ||
66 | if (irqs_disabled()) | ||
67 | local_irq_enable(); | ||
68 | } else { | ||
69 | local_irq_enable(); | ||
70 | /* | ||
71 | * Go into low thread priority and possibly | ||
72 | * low power mode. | ||
73 | */ | ||
74 | HMT_low(); | ||
75 | HMT_very_low(); | ||
110 | } | 76 | } |
77 | |||
78 | HMT_medium(); | ||
79 | ppc64_runlatch_on(); | ||
111 | } | 80 | } |
112 | 81 | ||
113 | int powersave_nap; | 82 | int powersave_nap; |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 76bd9da8cb71..ee7ac5e6e28a 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -669,7 +669,7 @@ __cpuinit void start_secondary(void *unused) | |||
669 | 669 | ||
670 | local_irq_enable(); | 670 | local_irq_enable(); |
671 | 671 | ||
672 | cpu_idle(); | 672 | cpu_startup_entry(CPUHP_ONLINE); |
673 | 673 | ||
674 | BUG(); | 674 | BUG(); |
675 | } | 675 | } |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 536d64579d9a..2bc3eddae34a 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -61,18 +61,8 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
61 | return sf->gprs[8]; | 61 | return sf->gprs[8]; |
62 | } | 62 | } |
63 | 63 | ||
64 | /* | 64 | void arch_cpu_idle(void) |
65 | * The idle loop on a S390... | ||
66 | */ | ||
67 | static void default_idle(void) | ||
68 | { | 65 | { |
69 | if (cpu_is_offline(smp_processor_id())) | ||
70 | cpu_die(); | ||
71 | local_irq_disable(); | ||
72 | if (need_resched()) { | ||
73 | local_irq_enable(); | ||
74 | return; | ||
75 | } | ||
76 | local_mcck_disable(); | 66 | local_mcck_disable(); |
77 | if (test_thread_flag(TIF_MCCK_PENDING)) { | 67 | if (test_thread_flag(TIF_MCCK_PENDING)) { |
78 | local_mcck_enable(); | 68 | local_mcck_enable(); |
@@ -83,19 +73,15 @@ static void default_idle(void) | |||
83 | vtime_stop_cpu(); | 73 | vtime_stop_cpu(); |
84 | } | 74 | } |
85 | 75 | ||
86 | void cpu_idle(void) | 76 | void arch_cpu_idle_exit(void) |
87 | { | 77 | { |
88 | for (;;) { | 78 | if (test_thread_flag(TIF_MCCK_PENDING)) |
89 | tick_nohz_idle_enter(); | 79 | s390_handle_mcck(); |
90 | rcu_idle_enter(); | 80 | } |
91 | while (!need_resched() && !test_thread_flag(TIF_MCCK_PENDING)) | 81 | |
92 | default_idle(); | 82 | void arch_cpu_idle_dead(void) |
93 | rcu_idle_exit(); | 83 | { |
94 | tick_nohz_idle_exit(); | 84 | cpu_die(); |
95 | if (test_thread_flag(TIF_MCCK_PENDING)) | ||
96 | s390_handle_mcck(); | ||
97 | schedule_preempt_disabled(); | ||
98 | } | ||
99 | } | 85 | } |
100 | 86 | ||
101 | extern void __kprobes kernel_thread_starter(void); | 87 | extern void __kprobes kernel_thread_starter(void); |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 8bde89eafd88..8074cb4b7cbf 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -714,8 +714,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid) | |||
714 | set_cpu_online(smp_processor_id(), true); | 714 | set_cpu_online(smp_processor_id(), true); |
715 | inc_irq_stat(CPU_RST); | 715 | inc_irq_stat(CPU_RST); |
716 | local_irq_enable(); | 716 | local_irq_enable(); |
717 | /* cpu_idle will call schedule for us */ | 717 | cpu_startup_entry(CPUHP_ONLINE); |
718 | cpu_idle(); | ||
719 | } | 718 | } |
720 | 719 | ||
721 | /* Upping and downing of CPUs */ | 720 | /* Upping and downing of CPUs */ |
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index a0042acbd989..3fb09359eda6 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -158,8 +158,6 @@ void __kprobes vtime_stop_cpu(void) | |||
158 | unsigned long psw_mask; | 158 | unsigned long psw_mask; |
159 | 159 | ||
160 | trace_hardirqs_on(); | 160 | trace_hardirqs_on(); |
161 | /* Don't trace preempt off for idle. */ | ||
162 | stop_critical_timings(); | ||
163 | 161 | ||
164 | /* Wait for external, I/O or machine check interrupt. */ | 162 | /* Wait for external, I/O or machine check interrupt. */ |
165 | psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT | | 163 | psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT | |
@@ -169,9 +167,6 @@ void __kprobes vtime_stop_cpu(void) | |||
169 | /* Call the assembler magic in entry.S */ | 167 | /* Call the assembler magic in entry.S */ |
170 | psw_idle(idle, psw_mask); | 168 | psw_idle(idle, psw_mask); |
171 | 169 | ||
172 | /* Reenable preemption tracer. */ | ||
173 | start_critical_timings(); | ||
174 | |||
175 | /* Account time spent with enabled wait psw loaded as idle time. */ | 170 | /* Account time spent with enabled wait psw loaded as idle time. */ |
176 | idle->sequence++; | 171 | idle->sequence++; |
177 | smp_wmb(); | 172 | smp_wmb(); |
diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c index 79568466b578..f4c6d02421d3 100644 --- a/arch/score/kernel/process.c +++ b/arch/score/kernel/process.c | |||
@@ -41,24 +41,6 @@ void machine_halt(void) {} | |||
41 | /* If or when software machine-power-off is implemented, add code here. */ | 41 | /* If or when software machine-power-off is implemented, add code here. */ |
42 | void machine_power_off(void) {} | 42 | void machine_power_off(void) {} |
43 | 43 | ||
44 | /* | ||
45 | * The idle thread. There's no useful work to be | ||
46 | * done, so just try to conserve power and have a | ||
47 | * low exit latency (ie sit in a loop waiting for | ||
48 | * somebody to say that they'd like to reschedule) | ||
49 | */ | ||
50 | void __noreturn cpu_idle(void) | ||
51 | { | ||
52 | /* endless idle loop with no priority at all */ | ||
53 | while (1) { | ||
54 | rcu_idle_enter(); | ||
55 | while (!need_resched()) | ||
56 | barrier(); | ||
57 | rcu_idle_exit(); | ||
58 | schedule_preempt_disabled(); | ||
59 | } | ||
60 | } | ||
61 | |||
62 | void ret_from_fork(void); | 44 | void ret_from_fork(void); |
63 | void ret_from_kernel_thread(void); | 45 | void ret_from_kernel_thread(void); |
64 | 46 | ||
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 5e859633ce69..1ea597c6497a 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -33,6 +33,7 @@ config SUPERH | |||
33 | select GENERIC_ATOMIC64 | 33 | select GENERIC_ATOMIC64 |
34 | select GENERIC_IRQ_SHOW | 34 | select GENERIC_IRQ_SHOW |
35 | select GENERIC_SMP_IDLE_THREAD | 35 | select GENERIC_SMP_IDLE_THREAD |
36 | select GENERIC_IDLE_POLL_SETUP | ||
36 | select GENERIC_CLOCKEVENTS | 37 | select GENERIC_CLOCKEVENTS |
37 | select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST | 38 | select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST |
38 | select GENERIC_STRNCPY_FROM_USER | 39 | select GENERIC_STRNCPY_FROM_USER |
@@ -148,9 +149,6 @@ config ARCH_HAS_ILOG2_U32 | |||
148 | config ARCH_HAS_ILOG2_U64 | 149 | config ARCH_HAS_ILOG2_U64 |
149 | def_bool n | 150 | def_bool n |
150 | 151 | ||
151 | config ARCH_HAS_DEFAULT_IDLE | ||
152 | def_bool y | ||
153 | |||
154 | config NO_IOPORT | 152 | config NO_IOPORT |
155 | def_bool !PCI | 153 | def_bool !PCI |
156 | depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN && \ | 154 | depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN && \ |
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 7d5ac4e48485..45a93669289d 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h | |||
@@ -207,8 +207,6 @@ static inline bool test_and_clear_restore_sigmask(void) | |||
207 | return true; | 207 | return true; |
208 | } | 208 | } |
209 | 209 | ||
210 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) | ||
211 | |||
212 | #endif /* !__ASSEMBLY__ */ | 210 | #endif /* !__ASSEMBLY__ */ |
213 | 211 | ||
214 | #endif /* __KERNEL__ */ | 212 | #endif /* __KERNEL__ */ |
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 3d5a1b387cc0..2ea4483fd722 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c | |||
@@ -24,98 +24,24 @@ | |||
24 | 24 | ||
25 | static void (*sh_idle)(void); | 25 | static void (*sh_idle)(void); |
26 | 26 | ||
27 | static int hlt_counter; | 27 | void default_idle(void) |
28 | |||
29 | static int __init nohlt_setup(char *__unused) | ||
30 | { | ||
31 | hlt_counter = 1; | ||
32 | return 1; | ||
33 | } | ||
34 | __setup("nohlt", nohlt_setup); | ||
35 | |||
36 | static int __init hlt_setup(char *__unused) | ||
37 | { | ||
38 | hlt_counter = 0; | ||
39 | return 1; | ||
40 | } | ||
41 | __setup("hlt", hlt_setup); | ||
42 | |||
43 | static inline int hlt_works(void) | ||
44 | { | ||
45 | return !hlt_counter; | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * On SMP it's slightly faster (but much more power-consuming!) | ||
50 | * to poll the ->work.need_resched flag instead of waiting for the | ||
51 | * cross-CPU IPI to arrive. Use this option with caution. | ||
52 | */ | ||
53 | static void poll_idle(void) | ||
54 | { | 28 | { |
29 | set_bl_bit(); | ||
55 | local_irq_enable(); | 30 | local_irq_enable(); |
56 | while (!need_resched()) | 31 | /* Isn't this racy ? */ |
57 | cpu_relax(); | 32 | cpu_sleep(); |
33 | clear_bl_bit(); | ||
58 | } | 34 | } |
59 | 35 | ||
60 | void default_idle(void) | 36 | void arch_cpu_idle_dead(void) |
61 | { | 37 | { |
62 | if (hlt_works()) { | 38 | play_dead(); |
63 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
64 | smp_mb__after_clear_bit(); | ||
65 | |||
66 | set_bl_bit(); | ||
67 | if (!need_resched()) { | ||
68 | local_irq_enable(); | ||
69 | cpu_sleep(); | ||
70 | } else | ||
71 | local_irq_enable(); | ||
72 | |||
73 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
74 | clear_bl_bit(); | ||
75 | } else | ||
76 | poll_idle(); | ||
77 | } | 39 | } |
78 | 40 | ||
79 | /* | 41 | void arch_cpu_idle(void) |
80 | * The idle thread. There's no useful work to be done, so just try to conserve | ||
81 | * power and have a low exit latency (ie sit in a loop waiting for somebody to | ||
82 | * say that they'd like to reschedule) | ||
83 | */ | ||
84 | void cpu_idle(void) | ||
85 | { | 42 | { |
86 | unsigned int cpu = smp_processor_id(); | 43 | if (cpuidle_idle_call()) |
87 | 44 | sh_idle(); | |
88 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
89 | |||
90 | /* endless idle loop with no priority at all */ | ||
91 | while (1) { | ||
92 | tick_nohz_idle_enter(); | ||
93 | rcu_idle_enter(); | ||
94 | |||
95 | while (!need_resched()) { | ||
96 | check_pgt_cache(); | ||
97 | rmb(); | ||
98 | |||
99 | if (cpu_is_offline(cpu)) | ||
100 | play_dead(); | ||
101 | |||
102 | local_irq_disable(); | ||
103 | /* Don't trace irqs off for idle */ | ||
104 | stop_critical_timings(); | ||
105 | if (cpuidle_idle_call()) | ||
106 | sh_idle(); | ||
107 | /* | ||
108 | * Sanity check to ensure that sh_idle() returns | ||
109 | * with IRQs enabled | ||
110 | */ | ||
111 | WARN_ON(irqs_disabled()); | ||
112 | start_critical_timings(); | ||
113 | } | ||
114 | |||
115 | rcu_idle_exit(); | ||
116 | tick_nohz_idle_exit(); | ||
117 | schedule_preempt_disabled(); | ||
118 | } | ||
119 | } | 45 | } |
120 | 46 | ||
121 | void __init select_idle_routine(void) | 47 | void __init select_idle_routine(void) |
@@ -123,13 +49,8 @@ void __init select_idle_routine(void) | |||
123 | /* | 49 | /* |
124 | * If a platform has set its own idle routine, leave it alone. | 50 | * If a platform has set its own idle routine, leave it alone. |
125 | */ | 51 | */ |
126 | if (sh_idle) | 52 | if (!sh_idle) |
127 | return; | ||
128 | |||
129 | if (hlt_works()) | ||
130 | sh_idle = default_idle; | 53 | sh_idle = default_idle; |
131 | else | ||
132 | sh_idle = poll_idle; | ||
133 | } | 54 | } |
134 | 55 | ||
135 | void stop_this_cpu(void *unused) | 56 | void stop_this_cpu(void *unused) |
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 2062aa88af41..45696451f0ea 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -203,7 +203,7 @@ asmlinkage void __cpuinit start_secondary(void) | |||
203 | set_cpu_online(cpu, true); | 203 | set_cpu_online(cpu, true); |
204 | per_cpu(cpu_state, cpu) = CPU_ONLINE; | 204 | per_cpu(cpu_state, cpu) = CPU_ONLINE; |
205 | 205 | ||
206 | cpu_idle(); | 206 | cpu_startup_entry(CPUHP_ONLINE); |
207 | } | 207 | } |
208 | 208 | ||
209 | extern struct { | 209 | extern struct { |
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h index 25849ae3e900..dd3807599bb9 100644 --- a/arch/sparc/include/asm/thread_info_32.h +++ b/arch/sparc/include/asm/thread_info_32.h | |||
@@ -132,8 +132,6 @@ register struct thread_info *current_thread_info_reg asm("g6"); | |||
132 | #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \ | 132 | #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \ |
133 | _TIF_SIGPENDING) | 133 | _TIF_SIGPENDING) |
134 | 134 | ||
135 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) | ||
136 | |||
137 | #endif /* __KERNEL__ */ | 135 | #endif /* __KERNEL__ */ |
138 | 136 | ||
139 | #endif /* _ASM_THREAD_INFO_H */ | 137 | #endif /* _ASM_THREAD_INFO_H */ |
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 269bd92313df..d5e504251079 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h | |||
@@ -256,8 +256,6 @@ static inline bool test_and_clear_restore_sigmask(void) | |||
256 | return true; | 256 | return true; |
257 | } | 257 | } |
258 | 258 | ||
259 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) | ||
260 | |||
261 | #define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0) | 259 | #define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0) |
262 | #define test_thread_64bit_stack(__SP) \ | 260 | #define test_thread_64bit_stack(__SP) \ |
263 | ((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \ | 261 | ((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \ |
diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S index 9365432904d6..605c960b2fa6 100644 --- a/arch/sparc/kernel/hvtramp.S +++ b/arch/sparc/kernel/hvtramp.S | |||
@@ -128,8 +128,7 @@ hv_cpu_startup: | |||
128 | 128 | ||
129 | call smp_callin | 129 | call smp_callin |
130 | nop | 130 | nop |
131 | call cpu_idle | 131 | |
132 | mov 0, %o0 | ||
133 | call cpu_panic | 132 | call cpu_panic |
134 | nop | 133 | nop |
135 | 134 | ||
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 62eede13831a..c85241006e32 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c | |||
@@ -64,23 +64,12 @@ extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *); | |||
64 | struct task_struct *last_task_used_math = NULL; | 64 | struct task_struct *last_task_used_math = NULL; |
65 | struct thread_info *current_set[NR_CPUS]; | 65 | struct thread_info *current_set[NR_CPUS]; |
66 | 66 | ||
67 | /* | 67 | /* Idle loop support. */ |
68 | * the idle loop on a Sparc... ;) | 68 | void arch_cpu_idle(void) |
69 | */ | ||
70 | void cpu_idle(void) | ||
71 | { | 69 | { |
72 | set_thread_flag(TIF_POLLING_NRFLAG); | 70 | if (sparc_idle) |
73 | 71 | (*sparc_idle)(); | |
74 | /* endless idle loop with no priority at all */ | 72 | local_irq_enable(); |
75 | for (;;) { | ||
76 | while (!need_resched()) { | ||
77 | if (sparc_idle) | ||
78 | (*sparc_idle)(); | ||
79 | else | ||
80 | cpu_relax(); | ||
81 | } | ||
82 | schedule_preempt_disabled(); | ||
83 | } | ||
84 | } | 73 | } |
85 | 74 | ||
86 | /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */ | 75 | /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */ |
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index cdb80b2adbe0..9fbf0d14a361 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c | |||
@@ -52,20 +52,17 @@ | |||
52 | 52 | ||
53 | #include "kstack.h" | 53 | #include "kstack.h" |
54 | 54 | ||
55 | static void sparc64_yield(int cpu) | 55 | /* Idle loop support on sparc64. */ |
56 | void arch_cpu_idle(void) | ||
56 | { | 57 | { |
57 | if (tlb_type != hypervisor) { | 58 | if (tlb_type != hypervisor) { |
58 | touch_nmi_watchdog(); | 59 | touch_nmi_watchdog(); |
59 | return; | 60 | } else { |
60 | } | ||
61 | |||
62 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
63 | smp_mb__after_clear_bit(); | ||
64 | |||
65 | while (!need_resched() && !cpu_is_offline(cpu)) { | ||
66 | unsigned long pstate; | 61 | unsigned long pstate; |
67 | 62 | ||
68 | /* Disable interrupts. */ | 63 | /* The sun4v sleeping code requires that we have PSTATE.IE cleared over |
64 | * the cpu sleep hypervisor call. | ||
65 | */ | ||
69 | __asm__ __volatile__( | 66 | __asm__ __volatile__( |
70 | "rdpr %%pstate, %0\n\t" | 67 | "rdpr %%pstate, %0\n\t" |
71 | "andn %0, %1, %0\n\t" | 68 | "andn %0, %1, %0\n\t" |
@@ -73,7 +70,7 @@ static void sparc64_yield(int cpu) | |||
73 | : "=&r" (pstate) | 70 | : "=&r" (pstate) |
74 | : "i" (PSTATE_IE)); | 71 | : "i" (PSTATE_IE)); |
75 | 72 | ||
76 | if (!need_resched() && !cpu_is_offline(cpu)) | 73 | if (!need_resched() && !cpu_is_offline(smp_processor_id())) |
77 | sun4v_cpu_yield(); | 74 | sun4v_cpu_yield(); |
78 | 75 | ||
79 | /* Re-enable interrupts. */ | 76 | /* Re-enable interrupts. */ |
@@ -84,36 +81,16 @@ static void sparc64_yield(int cpu) | |||
84 | : "=&r" (pstate) | 81 | : "=&r" (pstate) |
85 | : "i" (PSTATE_IE)); | 82 | : "i" (PSTATE_IE)); |
86 | } | 83 | } |
87 | 84 | local_irq_enable(); | |
88 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
89 | } | 85 | } |
90 | 86 | ||
91 | /* The idle loop on sparc64. */ | ||
92 | void cpu_idle(void) | ||
93 | { | ||
94 | int cpu = smp_processor_id(); | ||
95 | |||
96 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
97 | |||
98 | while(1) { | ||
99 | tick_nohz_idle_enter(); | ||
100 | rcu_idle_enter(); | ||
101 | |||
102 | while (!need_resched() && !cpu_is_offline(cpu)) | ||
103 | sparc64_yield(cpu); | ||
104 | |||
105 | rcu_idle_exit(); | ||
106 | tick_nohz_idle_exit(); | ||
107 | |||
108 | #ifdef CONFIG_HOTPLUG_CPU | 87 | #ifdef CONFIG_HOTPLUG_CPU |
109 | if (cpu_is_offline(cpu)) { | 88 | void arch_cpu_idle_dead() |
110 | sched_preempt_enable_no_resched(); | 89 | { |
111 | cpu_play_dead(); | 90 | sched_preempt_enable_no_resched(); |
112 | } | 91 | cpu_play_dead(); |
113 | #endif | ||
114 | schedule_preempt_disabled(); | ||
115 | } | ||
116 | } | 92 | } |
93 | #endif | ||
117 | 94 | ||
118 | #ifdef CONFIG_COMPAT | 95 | #ifdef CONFIG_COMPAT |
119 | static void show_regwindow32(struct pt_regs *regs) | 96 | static void show_regwindow32(struct pt_regs *regs) |
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index 9e7e6d718367..e3f2b81c23f1 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c | |||
@@ -369,7 +369,7 @@ void __cpuinit sparc_start_secondary(void *arg) | |||
369 | local_irq_enable(); | 369 | local_irq_enable(); |
370 | 370 | ||
371 | wmb(); | 371 | wmb(); |
372 | cpu_idle(); | 372 | cpu_startup_entry(CPUHP_ONLINE); |
373 | 373 | ||
374 | /* We should never reach here! */ | 374 | /* We should never reach here! */ |
375 | BUG(); | 375 | BUG(); |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index ca64d2a86ec0..77539eda928c 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -127,6 +127,8 @@ void __cpuinit smp_callin(void) | |||
127 | 127 | ||
128 | /* idle thread is expected to have preempt disabled */ | 128 | /* idle thread is expected to have preempt disabled */ |
129 | preempt_disable(); | 129 | preempt_disable(); |
130 | |||
131 | cpu_startup_entry(CPUHP_ONLINE); | ||
130 | } | 132 | } |
131 | 133 | ||
132 | void cpu_panic(void) | 134 | void cpu_panic(void) |
diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S index da1b781b5e65..2e973a26fbda 100644 --- a/arch/sparc/kernel/trampoline_64.S +++ b/arch/sparc/kernel/trampoline_64.S | |||
@@ -407,8 +407,7 @@ after_lock_tlb: | |||
407 | 407 | ||
408 | call smp_callin | 408 | call smp_callin |
409 | nop | 409 | nop |
410 | call cpu_idle | 410 | |
411 | mov 0, %o0 | ||
412 | call cpu_panic | 411 | call cpu_panic |
413 | nop | 412 | nop |
414 | 1: b,a,pt %xcc, 1b | 413 | 1: b,a,pt %xcc, 1b |
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index e9c670d7a7fe..ccc8ef37235c 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h | |||
@@ -153,8 +153,6 @@ extern void _cpu_idle(void); | |||
153 | #define TS_POLLING 0x0004 /* in idle loop but not sleeping */ | 153 | #define TS_POLLING 0x0004 /* in idle loop but not sleeping */ |
154 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */ | 154 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */ |
155 | 155 | ||
156 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | ||
157 | |||
158 | #ifndef __ASSEMBLY__ | 156 | #ifndef __ASSEMBLY__ |
159 | #define HAVE_SET_RESTORE_SIGMASK 1 | 157 | #define HAVE_SET_RESTORE_SIGMASK 1 |
160 | static inline void set_restore_sigmask(void) | 158 | static inline void set_restore_sigmask(void) |
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index caf93ae11793..80b2a18deb87 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c | |||
@@ -40,13 +40,11 @@ | |||
40 | #include <arch/abi.h> | 40 | #include <arch/abi.h> |
41 | #include <arch/sim_def.h> | 41 | #include <arch/sim_def.h> |
42 | 42 | ||
43 | |||
44 | /* | 43 | /* |
45 | * Use the (x86) "idle=poll" option to prefer low latency when leaving the | 44 | * Use the (x86) "idle=poll" option to prefer low latency when leaving the |
46 | * idle loop over low power while in the idle loop, e.g. if we have | 45 | * idle loop over low power while in the idle loop, e.g. if we have |
47 | * one thread per core and we want to get threads out of futex waits fast. | 46 | * one thread per core and we want to get threads out of futex waits fast. |
48 | */ | 47 | */ |
49 | static int no_idle_nap; | ||
50 | static int __init idle_setup(char *str) | 48 | static int __init idle_setup(char *str) |
51 | { | 49 | { |
52 | if (!str) | 50 | if (!str) |
@@ -54,64 +52,19 @@ static int __init idle_setup(char *str) | |||
54 | 52 | ||
55 | if (!strcmp(str, "poll")) { | 53 | if (!strcmp(str, "poll")) { |
56 | pr_info("using polling idle threads.\n"); | 54 | pr_info("using polling idle threads.\n"); |
57 | no_idle_nap = 1; | 55 | cpu_idle_poll_ctrl(true); |
58 | } else if (!strcmp(str, "halt")) | 56 | return 0; |
59 | no_idle_nap = 0; | 57 | } else if (!strcmp(str, "halt")) { |
60 | else | 58 | return 0; |
61 | return -1; | 59 | } |
62 | 60 | return -1; | |
63 | return 0; | ||
64 | } | 61 | } |
65 | early_param("idle", idle_setup); | 62 | early_param("idle", idle_setup); |
66 | 63 | ||
67 | /* | 64 | void arch_cpu_idle(void) |
68 | * The idle thread. There's no useful work to be | ||
69 | * done, so just try to conserve power and have a | ||
70 | * low exit latency (ie sit in a loop waiting for | ||
71 | * somebody to say that they'd like to reschedule) | ||
72 | */ | ||
73 | void cpu_idle(void) | ||
74 | { | 65 | { |
75 | int cpu = smp_processor_id(); | 66 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; |
76 | 67 | _cpu_idle(); | |
77 | |||
78 | current_thread_info()->status |= TS_POLLING; | ||
79 | |||
80 | if (no_idle_nap) { | ||
81 | while (1) { | ||
82 | while (!need_resched()) | ||
83 | cpu_relax(); | ||
84 | schedule(); | ||
85 | } | ||
86 | } | ||
87 | |||
88 | /* endless idle loop with no priority at all */ | ||
89 | while (1) { | ||
90 | tick_nohz_idle_enter(); | ||
91 | rcu_idle_enter(); | ||
92 | while (!need_resched()) { | ||
93 | if (cpu_is_offline(cpu)) | ||
94 | BUG(); /* no HOTPLUG_CPU */ | ||
95 | |||
96 | local_irq_disable(); | ||
97 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; | ||
98 | current_thread_info()->status &= ~TS_POLLING; | ||
99 | /* | ||
100 | * TS_POLLING-cleared state must be visible before we | ||
101 | * test NEED_RESCHED: | ||
102 | */ | ||
103 | smp_mb(); | ||
104 | |||
105 | if (!need_resched()) | ||
106 | _cpu_idle(); | ||
107 | else | ||
108 | local_irq_enable(); | ||
109 | current_thread_info()->status |= TS_POLLING; | ||
110 | } | ||
111 | rcu_idle_exit(); | ||
112 | tick_nohz_idle_exit(); | ||
113 | schedule_preempt_disabled(); | ||
114 | } | ||
115 | } | 68 | } |
116 | 69 | ||
117 | /* | 70 | /* |
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c index e686c5ac90be..44bab29bf2f3 100644 --- a/arch/tile/kernel/smpboot.c +++ b/arch/tile/kernel/smpboot.c | |||
@@ -207,9 +207,7 @@ void __cpuinit online_secondary(void) | |||
207 | /* Set up tile-timer clock-event device on this cpu */ | 207 | /* Set up tile-timer clock-event device on this cpu */ |
208 | setup_tile_timer(); | 208 | setup_tile_timer(); |
209 | 209 | ||
210 | preempt_enable(); | 210 | cpu_startup_entry(CPUHP_ONLINE); |
211 | |||
212 | cpu_idle(); | ||
213 | } | 211 | } |
214 | 212 | ||
215 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) | 213 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) |
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index b462b13c5bae..bbcef522bcb1 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c | |||
@@ -210,33 +210,14 @@ void initial_thread_cb(void (*proc)(void *), void *arg) | |||
210 | kmalloc_ok = save_kmalloc_ok; | 210 | kmalloc_ok = save_kmalloc_ok; |
211 | } | 211 | } |
212 | 212 | ||
213 | void default_idle(void) | 213 | void arch_cpu_idle(void) |
214 | { | 214 | { |
215 | unsigned long long nsecs; | 215 | unsigned long long nsecs; |
216 | 216 | ||
217 | while (1) { | ||
218 | /* endless idle loop with no priority at all */ | ||
219 | |||
220 | /* | ||
221 | * although we are an idle CPU, we do not want to | ||
222 | * get into the scheduler unnecessarily. | ||
223 | */ | ||
224 | if (need_resched()) | ||
225 | schedule(); | ||
226 | |||
227 | tick_nohz_idle_enter(); | ||
228 | rcu_idle_enter(); | ||
229 | nsecs = disable_timer(); | ||
230 | idle_sleep(nsecs); | ||
231 | rcu_idle_exit(); | ||
232 | tick_nohz_idle_exit(); | ||
233 | } | ||
234 | } | ||
235 | |||
236 | void cpu_idle(void) | ||
237 | { | ||
238 | cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); | 217 | cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); |
239 | default_idle(); | 218 | nsecs = disable_timer(); |
219 | idle_sleep(nsecs); | ||
220 | local_irq_enable(); | ||
240 | } | 221 | } |
241 | 222 | ||
242 | int __cant_sleep(void) { | 223 | int __cant_sleep(void) { |
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c index 872d7e22d847..7fab86d7c5d4 100644 --- a/arch/unicore32/kernel/process.c +++ b/arch/unicore32/kernel/process.c | |||
@@ -45,25 +45,10 @@ static const char * const processor_modes[] = { | |||
45 | "UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR" | 45 | "UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR" |
46 | }; | 46 | }; |
47 | 47 | ||
48 | void cpu_idle(void) | 48 | void arch_cpu_idle(void) |
49 | { | 49 | { |
50 | /* endless idle loop with no priority at all */ | 50 | cpu_do_idle(); |
51 | while (1) { | 51 | local_irq_enable(); |
52 | tick_nohz_idle_enter(); | ||
53 | rcu_idle_enter(); | ||
54 | while (!need_resched()) { | ||
55 | local_irq_disable(); | ||
56 | stop_critical_timings(); | ||
57 | cpu_do_idle(); | ||
58 | local_irq_enable(); | ||
59 | start_critical_timings(); | ||
60 | } | ||
61 | rcu_idle_exit(); | ||
62 | tick_nohz_idle_exit(); | ||
63 | preempt_enable_no_resched(); | ||
64 | schedule(); | ||
65 | preempt_disable(); | ||
66 | } | ||
67 | } | 52 | } |
68 | 53 | ||
69 | static char reboot_mode = 'h'; | 54 | static char reboot_mode = 'h'; |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 15b5cef4aa38..d75b48c11be5 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -188,9 +188,6 @@ config GENERIC_CALIBRATE_DELAY | |||
188 | config ARCH_HAS_CPU_RELAX | 188 | config ARCH_HAS_CPU_RELAX |
189 | def_bool y | 189 | def_bool y |
190 | 190 | ||
191 | config ARCH_HAS_DEFAULT_IDLE | ||
192 | def_bool y | ||
193 | |||
194 | config ARCH_HAS_CACHE_LINE_SIZE | 191 | config ARCH_HAS_CACHE_LINE_SIZE |
195 | def_bool y | 192 | def_bool y |
196 | 193 | ||
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 2cd056e3ada3..a1df6e84691f 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -241,8 +241,6 @@ static inline struct thread_info *current_thread_info(void) | |||
241 | skip sending interrupt */ | 241 | skip sending interrupt */ |
242 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ | 242 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ |
243 | 243 | ||
244 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | ||
245 | |||
246 | #ifndef __ASSEMBLY__ | 244 | #ifndef __ASSEMBLY__ |
247 | #define HAVE_SET_RESTORE_SIGMASK 1 | 245 | #define HAVE_SET_RESTORE_SIGMASK 1 |
248 | static inline void set_restore_sigmask(void) | 246 | static inline void set_restore_sigmask(void) |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 14ae10031ff0..6833bffaadb7 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -301,13 +301,7 @@ void exit_idle(void) | |||
301 | } | 301 | } |
302 | #endif | 302 | #endif |
303 | 303 | ||
304 | /* | 304 | void arch_cpu_idle_prepare(void) |
305 | * The idle thread. There's no useful work to be | ||
306 | * done, so just try to conserve power and have a | ||
307 | * low exit latency (ie sit in a loop waiting for | ||
308 | * somebody to say that they'd like to reschedule) | ||
309 | */ | ||
310 | void cpu_idle(void) | ||
311 | { | 305 | { |
312 | /* | 306 | /* |
313 | * If we're the non-boot CPU, nothing set the stack canary up | 307 | * If we're the non-boot CPU, nothing set the stack canary up |
@@ -317,71 +311,40 @@ void cpu_idle(void) | |||
317 | * canaries already on the stack wont ever trigger). | 311 | * canaries already on the stack wont ever trigger). |
318 | */ | 312 | */ |
319 | boot_init_stack_canary(); | 313 | boot_init_stack_canary(); |
320 | current_thread_info()->status |= TS_POLLING; | 314 | } |
321 | |||
322 | while (1) { | ||
323 | tick_nohz_idle_enter(); | ||
324 | |||
325 | while (!need_resched()) { | ||
326 | rmb(); | ||
327 | |||
328 | if (cpu_is_offline(smp_processor_id())) | ||
329 | play_dead(); | ||
330 | |||
331 | /* | ||
332 | * Idle routines should keep interrupts disabled | ||
333 | * from here on, until they go to idle. | ||
334 | * Otherwise, idle callbacks can misfire. | ||
335 | */ | ||
336 | local_touch_nmi(); | ||
337 | local_irq_disable(); | ||
338 | |||
339 | enter_idle(); | ||
340 | |||
341 | /* Don't trace irqs off for idle */ | ||
342 | stop_critical_timings(); | ||
343 | |||
344 | /* enter_idle() needs rcu for notifiers */ | ||
345 | rcu_idle_enter(); | ||
346 | 315 | ||
347 | if (cpuidle_idle_call()) | 316 | void arch_cpu_idle_enter(void) |
348 | x86_idle(); | 317 | { |
318 | local_touch_nmi(); | ||
319 | enter_idle(); | ||
320 | } | ||
349 | 321 | ||
350 | rcu_idle_exit(); | 322 | void arch_cpu_idle_exit(void) |
351 | start_critical_timings(); | 323 | { |
324 | __exit_idle(); | ||
325 | } | ||
352 | 326 | ||
353 | /* In many cases the interrupt that ended idle | 327 | void arch_cpu_idle_dead(void) |
354 | has already called exit_idle. But some idle | 328 | { |
355 | loops can be woken up without interrupt. */ | 329 | play_dead(); |
356 | __exit_idle(); | 330 | } |
357 | } | ||
358 | 331 | ||
359 | tick_nohz_idle_exit(); | 332 | /* |
360 | preempt_enable_no_resched(); | 333 | * Called from the generic idle code. |
361 | schedule(); | 334 | */ |
362 | preempt_disable(); | 335 | void arch_cpu_idle(void) |
363 | } | 336 | { |
337 | if (cpuidle_idle_call()) | ||
338 | x86_idle(); | ||
364 | } | 339 | } |
365 | 340 | ||
366 | /* | 341 | /* |
367 | * We use this if we don't have any better | 342 | * We use this if we don't have any better idle routine.. |
368 | * idle routine.. | ||
369 | */ | 343 | */ |
370 | void default_idle(void) | 344 | void default_idle(void) |
371 | { | 345 | { |
372 | trace_cpu_idle_rcuidle(1, smp_processor_id()); | 346 | trace_cpu_idle_rcuidle(1, smp_processor_id()); |
373 | current_thread_info()->status &= ~TS_POLLING; | 347 | safe_halt(); |
374 | /* | ||
375 | * TS_POLLING-cleared state must be visible before we | ||
376 | * test NEED_RESCHED: | ||
377 | */ | ||
378 | smp_mb(); | ||
379 | |||
380 | if (!need_resched()) | ||
381 | safe_halt(); /* enables interrupts racelessly */ | ||
382 | else | ||
383 | local_irq_enable(); | ||
384 | current_thread_info()->status |= TS_POLLING; | ||
385 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | 348 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
386 | } | 349 | } |
387 | #ifdef CONFIG_APM_MODULE | 350 | #ifdef CONFIG_APM_MODULE |
@@ -411,20 +374,6 @@ void stop_this_cpu(void *dummy) | |||
411 | halt(); | 374 | halt(); |
412 | } | 375 | } |
413 | 376 | ||
414 | /* | ||
415 | * On SMP it's slightly faster (but much more power-consuming!) | ||
416 | * to poll the ->work.need_resched flag instead of waiting for the | ||
417 | * cross-CPU IPI to arrive. Use this option with caution. | ||
418 | */ | ||
419 | static void poll_idle(void) | ||
420 | { | ||
421 | trace_cpu_idle_rcuidle(0, smp_processor_id()); | ||
422 | local_irq_enable(); | ||
423 | while (!need_resched()) | ||
424 | cpu_relax(); | ||
425 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | ||
426 | } | ||
427 | |||
428 | bool amd_e400_c1e_detected; | 377 | bool amd_e400_c1e_detected; |
429 | EXPORT_SYMBOL(amd_e400_c1e_detected); | 378 | EXPORT_SYMBOL(amd_e400_c1e_detected); |
430 | 379 | ||
@@ -489,10 +438,10 @@ static void amd_e400_idle(void) | |||
489 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | 438 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) |
490 | { | 439 | { |
491 | #ifdef CONFIG_SMP | 440 | #ifdef CONFIG_SMP |
492 | if (x86_idle == poll_idle && smp_num_siblings > 1) | 441 | if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1) |
493 | pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); | 442 | pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); |
494 | #endif | 443 | #endif |
495 | if (x86_idle) | 444 | if (x86_idle || boot_option_idle_override == IDLE_POLL) |
496 | return; | 445 | return; |
497 | 446 | ||
498 | if (cpu_has_amd_erratum(amd_erratum_400)) { | 447 | if (cpu_has_amd_erratum(amd_erratum_400)) { |
@@ -517,8 +466,8 @@ static int __init idle_setup(char *str) | |||
517 | 466 | ||
518 | if (!strcmp(str, "poll")) { | 467 | if (!strcmp(str, "poll")) { |
519 | pr_info("using polling idle threads\n"); | 468 | pr_info("using polling idle threads\n"); |
520 | x86_idle = poll_idle; | ||
521 | boot_option_idle_override = IDLE_POLL; | 469 | boot_option_idle_override = IDLE_POLL; |
470 | cpu_idle_poll_ctrl(true); | ||
522 | } else if (!strcmp(str, "halt")) { | 471 | } else if (!strcmp(str, "halt")) { |
523 | /* | 472 | /* |
524 | * When the boot option of idle=halt is added, halt is | 473 | * When the boot option of idle=halt is added, halt is |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 9f190a2a00e9..9c73b51817e4 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -284,7 +284,7 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
284 | x86_cpuinit.setup_percpu_clockev(); | 284 | x86_cpuinit.setup_percpu_clockev(); |
285 | 285 | ||
286 | wmb(); | 286 | wmb(); |
287 | cpu_idle(); | 287 | cpu_startup_entry(CPUHP_ONLINE); |
288 | } | 288 | } |
289 | 289 | ||
290 | void __init smp_store_boot_cpu_info(void) | 290 | void __init smp_store_boot_cpu_info(void) |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 0d466d7c7175..8ff37995d54e 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -95,7 +95,7 @@ static void __cpuinit cpu_bringup(void) | |||
95 | static void __cpuinit cpu_bringup_and_idle(void) | 95 | static void __cpuinit cpu_bringup_and_idle(void) |
96 | { | 96 | { |
97 | cpu_bringup(); | 97 | cpu_bringup(); |
98 | cpu_idle(); | 98 | cpu_startup_entry(CPUHP_ONLINE); |
99 | } | 99 | } |
100 | 100 | ||
101 | static int xen_smp_intr_init(unsigned int cpu) | 101 | static int xen_smp_intr_init(unsigned int cpu) |
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 5cd82e9f601c..1c85323f01d7 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c | |||
@@ -105,19 +105,9 @@ void coprocessor_flush_all(struct thread_info *ti) | |||
105 | /* | 105 | /* |
106 | * Powermanagement idle function, if any is provided by the platform. | 106 | * Powermanagement idle function, if any is provided by the platform. |
107 | */ | 107 | */ |
108 | 108 | void arch_cpu_idle(void) | |
109 | void cpu_idle(void) | ||
110 | { | 109 | { |
111 | local_irq_enable(); | 110 | platform_idle(); |
112 | |||
113 | /* endless idle loop with no priority at all */ | ||
114 | while (1) { | ||
115 | rcu_idle_enter(); | ||
116 | while (!need_resched()) | ||
117 | platform_idle(); | ||
118 | rcu_idle_exit(); | ||
119 | schedule_preempt_disabled(); | ||
120 | } | ||
121 | } | 111 | } |
122 | 112 | ||
123 | /* | 113 | /* |