diff options
-rw-r--r-- | arch/powerpc/include/asm/hw_irq.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/irq.c | 46 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/pervasive.c | 11 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/processor_idle.c | 17 |
4 files changed, 64 insertions, 12 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 6eb75b80488c..92224b7fd702 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h | |||
@@ -125,6 +125,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) | |||
125 | return !regs->softe; | 125 | return !regs->softe; |
126 | } | 126 | } |
127 | 127 | ||
128 | extern bool prep_irq_for_idle(void); | ||
129 | |||
128 | #else /* CONFIG_PPC64 */ | 130 | #else /* CONFIG_PPC64 */ |
129 | 131 | ||
130 | #define SET_MSR_EE(x) mtmsr(x) | 132 | #define SET_MSR_EE(x) mtmsr(x) |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 1b415027ec0e..9270a399c9d6 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -286,6 +286,52 @@ void notrace restore_interrupts(void) | |||
286 | __hard_irq_enable(); | 286 | __hard_irq_enable(); |
287 | } | 287 | } |
288 | 288 | ||
289 | /* | ||
290 | * This is a helper to use when about to go into idle low-power | ||
291 | * when the latter has the side effect of re-enabling interrupts | ||
292 | * (such as calling H_CEDE under pHyp). | ||
293 | * | ||
294 | * You call this function with interrupts soft-disabled (this is | ||
295 | * already the case when ppc_md.power_save is called). The function | ||
296 | * will return whether to enter power save or just return. | ||
297 | * | ||
298 | * In the former case, it will have notified lockdep of interrupts | ||
299 | * being re-enabled and generally sanitized the lazy irq state, | ||
300 | * and in the latter case it will leave with interrupts hard | ||
301 | * disabled and marked as such, so the local_irq_enable() call | ||
302 | * in cpu_idle() will properly re-enable everything. | ||
303 | */ | ||
304 | bool prep_irq_for_idle(void) | ||
305 | { | ||
306 | /* | ||
307 | * First we need to hard disable to ensure no interrupt | ||
308 | * occurs before we effectively enter the low power state | ||
309 | */ | ||
310 | hard_irq_disable(); | ||
311 | |||
312 | /* | ||
313 | * If anything happened while we were soft-disabled, | ||
314 | * we return now and do not enter the low power state. | ||
315 | */ | ||
316 | if (lazy_irq_pending()) | ||
317 | return false; | ||
318 | |||
319 | /* Tell lockdep we are about to re-enable */ | ||
320 | trace_hardirqs_on(); | ||
321 | |||
322 | /* | ||
323 | * Mark interrupts as soft-enabled and clear the | ||
324 | * PACA_IRQ_HARD_DIS from the pending mask since we | ||
325 | * are about to hard enable as well as a side effect | ||
326 | * of entering the low power state. | ||
327 | */ | ||
328 | local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; | ||
329 | local_paca->soft_enabled = 1; | ||
330 | |||
331 | /* Tell the caller to enter the low power state */ | ||
332 | return true; | ||
333 | } | ||
334 | |||
289 | #endif /* CONFIG_PPC64 */ | 335 | #endif /* CONFIG_PPC64 */ |
290 | 336 | ||
291 | int arch_show_interrupts(struct seq_file *p, int prec) | 337 | int arch_show_interrupts(struct seq_file *p, int prec) |
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c index efdacc829576..d17e98bc0c10 100644 --- a/arch/powerpc/platforms/cell/pervasive.c +++ b/arch/powerpc/platforms/cell/pervasive.c | |||
@@ -42,11 +42,9 @@ static void cbe_power_save(void) | |||
42 | { | 42 | { |
43 | unsigned long ctrl, thread_switch_control; | 43 | unsigned long ctrl, thread_switch_control; |
44 | 44 | ||
45 | /* | 45 | /* Ensure our interrupt state is properly tracked */ |
46 | * We need to hard disable interrupts, the local_irq_enable() done by | 46 | if (!prep_irq_for_idle()) |
47 | * our caller upon return will hard re-enable. | 47 | return; |
48 | */ | ||
49 | hard_irq_disable(); | ||
50 | 48 | ||
51 | ctrl = mfspr(SPRN_CTRLF); | 49 | ctrl = mfspr(SPRN_CTRLF); |
52 | 50 | ||
@@ -81,6 +79,9 @@ static void cbe_power_save(void) | |||
81 | */ | 79 | */ |
82 | ctrl &= ~(CTRL_RUNLATCH | CTRL_TE); | 80 | ctrl &= ~(CTRL_RUNLATCH | CTRL_TE); |
83 | mtspr(SPRN_CTRLT, ctrl); | 81 | mtspr(SPRN_CTRLT, ctrl); |
82 | |||
83 | /* Re-enable interrupts in MSR */ | ||
84 | __hard_irq_enable(); | ||
84 | } | 85 | } |
85 | 86 | ||
86 | static int cbe_system_reset_exception(struct pt_regs *regs) | 87 | static int cbe_system_reset_exception(struct pt_regs *regs) |
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c index e61483e8e960..c71be66bd5dc 100644 --- a/arch/powerpc/platforms/pseries/processor_idle.c +++ b/arch/powerpc/platforms/pseries/processor_idle.c | |||
@@ -99,15 +99,18 @@ out: | |||
99 | static void check_and_cede_processor(void) | 99 | static void check_and_cede_processor(void) |
100 | { | 100 | { |
101 | /* | 101 | /* |
102 | * Interrupts are soft-disabled at this point, | 102 | * Ensure our interrupt state is properly tracked, |
103 | * but not hard disabled. So an interrupt might have | 103 | * also checks if no interrupt has occurred while we |
104 | * occurred before entering NAP, and would be potentially | 104 | * were soft-disabled |
105 | * lost (edge events, decrementer events, etc...) unless | ||
106 | * we first hard disable then check. | ||
107 | */ | 105 | */ |
108 | hard_irq_disable(); | 106 | if (prep_irq_for_idle()) { |
109 | if (!lazy_irq_pending()) | ||
110 | cede_processor(); | 107 | cede_processor(); |
108 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
109 | /* Ensure that H_CEDE returns with IRQs on */ | ||
110 | if (WARN_ON(!(mfmsr() & MSR_EE))) | ||
111 | __hard_irq_enable(); | ||
112 | #endif | ||
113 | } | ||
111 | } | 114 | } |
112 | 115 | ||
113 | static int dedicated_cede_loop(struct cpuidle_device *dev, | 116 | static int dedicated_cede_loop(struct cpuidle_device *dev, |