aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-01-09 00:52:19 -0500
committerPaul Mackerras <paulus@samba.org>2009-01-09 03:48:17 -0500
commit93a6d3ce6962044fe9badf528fed46b455d58292 (patch)
treee3941058ddb4117b4598b8d94fb218cfeb9e2fa8 /arch/powerpc
parentd662ed26734473d4cb5f3d78cebfec8f9126e97c (diff)
powerpc: Provide a way to defer perf counter work until interrupts are enabled
Because 64-bit powerpc uses lazy (soft) interrupt disabling, it is possible for a performance monitor exception to come in when the kernel thinks interrupts are disabled (i.e. when they are soft-disabled but hard-enabled). In such a situation the performance monitor exception handler might have some processing to do (such as process wakeups) which can't be done in what is effectively an NMI handler. This provides a way to defer that work until interrupts get enabled, either in raw_local_irq_restore() or by returning from an interrupt handler to code that had interrupts enabled. We have a per-processor flag that indicates that there is work pending to do when interrupts subsequently get re-enabled. This flag is checked in the interrupt return path and in raw_local_irq_restore(), and if it is set, perf_counter_do_pending() is called to do the pending work. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/hw_irq.h31
-rw-r--r--arch/powerpc/include/asm/paca.h1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/entry_64.S9
-rw-r--r--arch/powerpc/kernel/irq.c10
5 files changed, 52 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index f75a5fc64d2e..e10f151c3db6 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -131,5 +131,36 @@ static inline int irqs_disabled_flags(unsigned long flags)
131 */ 131 */
132struct hw_interrupt_type; 132struct hw_interrupt_type;
133 133
134#ifdef CONFIG_PERF_COUNTERS
135static inline unsigned long get_perf_counter_pending(void)
136{
137 unsigned long x;
138
139 asm volatile("lbz %0,%1(13)"
140 : "=r" (x)
141 : "i" (offsetof(struct paca_struct, perf_counter_pending)));
142 return x;
143}
144
145static inline void set_perf_counter_pending(int x)
146{
147 asm volatile("stb %0,%1(13)" : :
148 "r" (x),
149 "i" (offsetof(struct paca_struct, perf_counter_pending)));
150}
151
152extern void perf_counter_do_pending(void);
153
154#else
155
156static inline unsigned long get_perf_counter_pending(void)
157{
158 return 0;
159}
160
161static inline void set_perf_counter_pending(int x) {}
162static inline void perf_counter_do_pending(void) {}
163#endif /* CONFIG_PERF_COUNTERS */
164
134#endif /* __KERNEL__ */ 165#endif /* __KERNEL__ */
135#endif /* _ASM_POWERPC_HW_IRQ_H */ 166#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 082b3aedf145..6ef055723019 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -99,6 +99,7 @@ struct paca_struct {
99 u8 soft_enabled; /* irq soft-enable flag */ 99 u8 soft_enabled; /* irq soft-enable flag */
100 u8 hard_enabled; /* set if irqs are enabled in MSR */ 100 u8 hard_enabled; /* set if irqs are enabled in MSR */
101 u8 io_sync; /* writel() needs spin_unlock sync */ 101 u8 io_sync; /* writel() needs spin_unlock sync */
102 u8 perf_counter_pending; /* PM interrupt while soft-disabled */
102 103
103 /* Stuff for accurate time accounting */ 104 /* Stuff for accurate time accounting */
104 u64 user_time; /* accumulated usermode TB ticks */ 105 u64 user_time; /* accumulated usermode TB ticks */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 661d07d2146b..cea462900119 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -127,6 +127,7 @@ int main(void)
127 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); 127 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
128 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); 128 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
129 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); 129 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
130 DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending));
130 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); 131 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
131 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); 132 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
132 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 133 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 383ed6eb0085..f30b4e553c53 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -526,6 +526,15 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
5262: 5262:
527 TRACE_AND_RESTORE_IRQ(r5); 527 TRACE_AND_RESTORE_IRQ(r5);
528 528
529#ifdef CONFIG_PERF_COUNTERS
530 /* check paca->perf_counter_pending if we're enabling ints */
531 lbz r3,PACAPERFPEND(r13)
532 and. r3,r3,r5
533 beq 27f
534 bl .perf_counter_do_pending
53527:
536#endif /* CONFIG_PERF_COUNTERS */
537
529 /* extract EE bit and use it to restore paca->hard_enabled */ 538 /* extract EE bit and use it to restore paca->hard_enabled */
530 ld r3,_MSR(r1) 539 ld r3,_MSR(r1)
531 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ 540 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index ac222d0ab12e..4efb886ea439 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -104,6 +104,13 @@ static inline notrace void set_soft_enabled(unsigned long enable)
104 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); 104 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
105} 105}
106 106
107#ifdef CONFIG_PERF_COUNTERS
108notrace void __weak perf_counter_do_pending(void)
109{
110 set_perf_counter_pending(0);
111}
112#endif
113
107notrace void raw_local_irq_restore(unsigned long en) 114notrace void raw_local_irq_restore(unsigned long en)
108{ 115{
109 /* 116 /*
@@ -135,6 +142,9 @@ notrace void raw_local_irq_restore(unsigned long en)
135 iseries_handle_interrupts(); 142 iseries_handle_interrupts();
136 } 143 }
137 144
145 if (get_perf_counter_pending())
146 perf_counter_do_pending();
147
138 /* 148 /*
139 * if (get_paca()->hard_enabled) return; 149 * if (get_paca()->hard_enabled) return;
140 * But again we need to take care that gcc gets hard_enabled directly 150 * But again we need to take care that gcc gets hard_enabled directly