aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/hw_irq.h
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-01-09 00:52:19 -0500
committerPaul Mackerras <paulus@samba.org>2009-01-09 03:48:17 -0500
commit93a6d3ce6962044fe9badf528fed46b455d58292 (patch)
treee3941058ddb4117b4598b8d94fb218cfeb9e2fa8 /arch/powerpc/include/asm/hw_irq.h
parentd662ed26734473d4cb5f3d78cebfec8f9126e97c (diff)
powerpc: Provide a way to defer perf counter work until interrupts are enabled
Because 64-bit powerpc uses lazy (soft) interrupt disabling, it is possible for a performance monitor exception to come in when the kernel thinks interrupts are disabled (i.e. when they are soft-disabled but hard-enabled). In such a situation the performance monitor exception handler might have some processing to do (such as process wakeups) which can't be done in what is effectively an NMI handler. This provides a way to defer that work until interrupts get enabled, either in raw_local_irq_restore() or by returning from an interrupt handler to code that had interrupts enabled. We have a per-processor flag that indicates that there is work pending to do when interrupts subsequently get re-enabled. This flag is checked in the interrupt return path and in raw_local_irq_restore(), and if it is set, perf_counter_do_pending() is called to do the pending work. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/include/asm/hw_irq.h')
-rw-r--r--arch/powerpc/include/asm/hw_irq.h31
1 files changed, 31 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index f75a5fc64d2e..e10f151c3db6 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -131,5 +131,36 @@ static inline int irqs_disabled_flags(unsigned long flags)
131 */ 131 */
132struct hw_interrupt_type; 132struct hw_interrupt_type;
133 133
134#ifdef CONFIG_PERF_COUNTERS
135static inline unsigned long get_perf_counter_pending(void)
136{
137 unsigned long x;
138
139 asm volatile("lbz %0,%1(13)"
140 : "=r" (x)
141 : "i" (offsetof(struct paca_struct, perf_counter_pending)));
142 return x;
143}
144
145static inline void set_perf_counter_pending(int x)
146{
147 asm volatile("stb %0,%1(13)" : :
148 "r" (x),
149 "i" (offsetof(struct paca_struct, perf_counter_pending)));
150}
151
152extern void perf_counter_do_pending(void);
153
154#else
155
156static inline unsigned long get_perf_counter_pending(void)
157{
158 return 0;
159}
160
161static inline void set_perf_counter_pending(int x) {}
162static inline void perf_counter_do_pending(void) {}
163#endif /* CONFIG_PERF_COUNTERS */
164
134#endif /* __KERNEL__ */ 165#endif /* __KERNEL__ */
135#endif /* _ASM_POWERPC_HW_IRQ_H */ 166#endif /* _ASM_POWERPC_HW_IRQ_H */