aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/irq.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 21:55:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 21:55:10 -0400
commit5375871d432ae9fc581014ac117b96aaee3cd0c7 (patch)
treebe98e8255b0f927fb920fb532a598b93fa140dbe /arch/powerpc/kernel/irq.c
parentb57cb7231b2ce52d3dda14a7b417ae125fb2eb97 (diff)
parentdfbc2d75c1bd47c3186fa91f1655ea2f3825b0ec (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc merge from Benjamin Herrenschmidt: "Here's the powerpc batch for this merge window. It is going to be a bit more nasty than usual as in touching things outside of arch/powerpc mostly due to the big iSeriesectomy :-) We finally got rid of the bugger (legacy iSeries support) which was a PITA to maintain and that nobody really used anymore. Here are some of the highlights: - Legacy iSeries is gone. Thanks Stephen ! There's still some bits and pieces remaining if you do a grep -ir series arch/powerpc but they are harmless and will be removed in the next few weeks hopefully. - The 'fadump' functionality (Firmware Assisted Dump) replaces the previous (equivalent) "pHyp assisted dump"... it's a rewrite of a mechanism to get the hypervisor to do crash dumps on pSeries, the new implementation hopefully being much more reliable. Thanks Mahesh Salgaonkar. - The "EEH" code (pSeries PCI error handling & recovery) got a big spring cleaning, motivated by the need to be able to implement a new backend for it on top of some new different type of firwmare. The work isn't complete yet, but a good chunk of the cleanups is there. Note that this adds a field to struct device_node which is not very nice and which Grant objects to. I will have a patch soon that moves that to a powerpc private data structure (hopefully before rc1) and we'll improve things further later on (hopefully getting rid of the need for that pointer completely). Thanks Gavin Shan. - I dug into our exception & interrupt handling code to improve the way we do lazy interrupt handling (and make it work properly with "edge" triggered interrupt sources), and while at it found & fixed a wagon of issues in those areas, including adding support for page fault retry & fatal signals on page faults. - Your usual random batch of small fixes & updates, including a bunch of new embedded boards, both Freescale and APM based ones, etc..." I fixed up some conflicts with the generalized irq-domain changes from Grant Likely, hopefully correctly. * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (141 commits) powerpc/ps3: Do not adjust the wrapper load address powerpc: Remove the rest of the legacy iSeries include files powerpc: Remove the remaining CONFIG_PPC_ISERIES pieces init: Remove CONFIG_PPC_ISERIES powerpc: Remove FW_FEATURE ISERIES from arch code tty/hvc_vio: FW_FEATURE_ISERIES is no longer selectable powerpc/spufs: Fix double unlocks powerpc/5200: convert mpc5200 to use of_platform_populate() powerpc/mpc5200: add options to mpc5200_defconfig powerpc/mpc52xx: add a4m072 board support powerpc/mpc5200: update mpc5200_defconfig to fit for charon board Documentation/powerpc/mpc52xx.txt: Checkpatch cleanup powerpc/44x: Add additional device support for APM821xx SoC and Bluestone board powerpc/44x: Add support PCI-E for APM821xx SoC and Bluestone board MAINTAINERS: Update PowerPC 4xx tree powerpc/44x: The bug fixed support for APM821xx SoC and Bluestone board powerpc: document the FSL MPIC message register binding powerpc: add support for MPIC message register API powerpc/fsl: Added aliased MSIIR register address to MSI node in dts powerpc/85xx: mpc8548cds - add 36-bit dts ...
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r--arch/powerpc/kernel/irq.c212
1 files changed, 141 insertions, 71 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index bdfb3eee3e6f..a3d128e94cff 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -93,20 +93,16 @@ extern int tau_interrupts(int);
93 93
94#ifdef CONFIG_PPC64 94#ifdef CONFIG_PPC64
95 95
96#ifndef CONFIG_SPARSE_IRQ
97EXPORT_SYMBOL(irq_desc);
98#endif
99
100int distribute_irqs = 1; 96int distribute_irqs = 1;
101 97
102static inline notrace unsigned long get_hard_enabled(void) 98static inline notrace unsigned long get_irq_happened(void)
103{ 99{
104 unsigned long enabled; 100 unsigned long happened;
105 101
106 __asm__ __volatile__("lbz %0,%1(13)" 102 __asm__ __volatile__("lbz %0,%1(13)"
107 : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); 103 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
108 104
109 return enabled; 105 return happened;
110} 106}
111 107
112static inline notrace void set_soft_enabled(unsigned long enable) 108static inline notrace void set_soft_enabled(unsigned long enable)
@@ -115,88 +111,162 @@ static inline notrace void set_soft_enabled(unsigned long enable)
115 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); 111 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
116} 112}
117 113
118static inline notrace void decrementer_check_overflow(void) 114static inline notrace int decrementer_check_overflow(void)
119{ 115{
120 u64 now = get_tb_or_rtc(); 116 u64 now = get_tb_or_rtc();
121 u64 *next_tb; 117 u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
122 118
123 preempt_disable();
124 next_tb = &__get_cpu_var(decrementers_next_tb);
125
126 if (now >= *next_tb) 119 if (now >= *next_tb)
127 set_dec(1); 120 set_dec(1);
128 preempt_enable(); 121 return now >= *next_tb;
129} 122}
130 123
131notrace void arch_local_irq_restore(unsigned long en) 124/* This is called whenever we are re-enabling interrupts
125 * and returns either 0 (nothing to do) or 500/900 if there's
126 * either an EE or a DEC to generate.
127 *
128 * This is called in two contexts: From arch_local_irq_restore()
129 * before soft-enabling interrupts, and from the exception exit
130 * path when returning from an interrupt from a soft-disabled to
131 * a soft enabled context. In both case we have interrupts hard
132 * disabled.
133 *
134 * We take care of only clearing the bits we handled in the
135 * PACA irq_happened field since we can only re-emit one at a
136 * time and we don't want to "lose" one.
137 */
138notrace unsigned int __check_irq_replay(void)
132{ 139{
133 /* 140 /*
134 * get_paca()->soft_enabled = en; 141 * We use local_paca rather than get_paca() to avoid all
135 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? 142 * the debug_smp_processor_id() business in this low level
136 * That was allowed before, and in such a case we do need to take care 143 * function
137 * that gcc will set soft_enabled directly via r13, not choose to use
138 * an intermediate register, lest we're preempted to a different cpu.
139 */ 144 */
140 set_soft_enabled(en); 145 unsigned char happened = local_paca->irq_happened;
141 if (!en)
142 return;
143 146
144#ifdef CONFIG_PPC_STD_MMU_64 147 /* Clear bit 0 which we wouldn't clear otherwise */
145 if (firmware_has_feature(FW_FEATURE_ISERIES)) { 148 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
146 /* 149
147 * Do we need to disable preemption here? Not really: in the 150 /*
148 * unlikely event that we're preempted to a different cpu in 151 * Force the delivery of pending soft-disabled interrupts on PS3.
149 * between getting r13, loading its lppaca_ptr, and loading 152 * Any HV call will have this side effect.
150 * its any_int, we might call iseries_handle_interrupts without 153 */
151 * an interrupt pending on the new cpu, but that's no disaster, 154 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
152 * is it? And the business of preempting us off the old cpu 155 u64 tmp, tmp2;
153 * would itself involve a local_irq_restore which handles the 156 lv1_get_version_info(&tmp, &tmp2);
154 * interrupt to that cpu.
155 *
156 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
157 * to avoid any preemption checking added into get_paca().
158 */
159 if (local_paca->lppaca_ptr->int_dword.any_int)
160 iseries_handle_interrupts();
161 } 157 }
162#endif /* CONFIG_PPC_STD_MMU_64 */
163 158
164 /* 159 /*
165 * if (get_paca()->hard_enabled) return; 160 * We may have missed a decrementer interrupt. We check the
166 * But again we need to take care that gcc gets hard_enabled directly 161 * decrementer itself rather than the paca irq_happened field
167 * via r13, not choose to use an intermediate register, lest we're 162 * in case we also had a rollover while hard disabled
168 * preempted to a different cpu in between the two instructions. 163 */
164 local_paca->irq_happened &= ~PACA_IRQ_DEC;
165 if (decrementer_check_overflow())
166 return 0x900;
167
168 /* Finally check if an external interrupt happened */
169 local_paca->irq_happened &= ~PACA_IRQ_EE;
170 if (happened & PACA_IRQ_EE)
171 return 0x500;
172
173#ifdef CONFIG_PPC_BOOK3E
174 /* Finally check if an EPR external interrupt happened
175 * this bit is typically set if we need to handle another
176 * "edge" interrupt from within the MPIC "EPR" handler
169 */ 177 */
170 if (get_hard_enabled()) 178 local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
179 if (happened & PACA_IRQ_EE_EDGE)
180 return 0x500;
181
182 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
183 if (happened & PACA_IRQ_DBELL)
184 return 0x280;
185#endif /* CONFIG_PPC_BOOK3E */
186
187 /* There should be nothing left ! */
188 BUG_ON(local_paca->irq_happened != 0);
189
190 return 0;
191}
192
193notrace void arch_local_irq_restore(unsigned long en)
194{
195 unsigned char irq_happened;
196 unsigned int replay;
197
198 /* Write the new soft-enabled value */
199 set_soft_enabled(en);
200 if (!en)
201 return;
202 /*
203 * From this point onward, we can take interrupts, preempt,
204 * etc... unless we got hard-disabled. We check if an event
205 * happened. If none happened, we know we can just return.
206 *
207 * We may have preempted before the check below, in which case
208 * we are checking the "new" CPU instead of the old one. This
209 * is only a problem if an event happened on the "old" CPU.
210 *
211 * External interrupt events on non-iseries will have caused
212 * interrupts to be hard-disabled, so there is no problem, we
213 * cannot have preempted.
214 */
215 irq_happened = get_irq_happened();
216 if (!irq_happened)
171 return; 217 return;
172 218
173 /* 219 /*
174 * Need to hard-enable interrupts here. Since currently disabled, 220 * We need to hard disable to get a trusted value from
175 * no need to take further asm precautions against preemption; but 221 * __check_irq_replay(). We also need to soft-disable
176 * use local_paca instead of get_paca() to avoid preemption checking. 222 * again to avoid warnings in there due to the use of
223 * per-cpu variables.
224 *
225 * We know that if the value in irq_happened is exactly 0x01
226 * then we are already hard disabled (there are other less
227 * common cases that we'll ignore for now), so we skip the
228 * (expensive) mtmsrd.
177 */ 229 */
178 local_paca->hard_enabled = en; 230 if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
231 __hard_irq_disable();
232 set_soft_enabled(0);
179 233
180 /* 234 /*
181 * Trigger the decrementer if we have a pending event. Some processors 235 * Check if anything needs to be re-emitted. We haven't
182 * only trigger on edge transitions of the sign bit. We might also 236 * soft-enabled yet to avoid warnings in decrementer_check_overflow
183 * have disabled interrupts long enough that the decrementer wrapped 237 * accessing per-cpu variables
184 * to positive.
185 */ 238 */
186 decrementer_check_overflow(); 239 replay = __check_irq_replay();
240
241 /* We can soft-enable now */
242 set_soft_enabled(1);
187 243
188 /* 244 /*
189 * Force the delivery of pending soft-disabled interrupts on PS3. 245 * And replay if we have to. This will return with interrupts
190 * Any HV call will have this side effect. 246 * hard-enabled.
191 */ 247 */
192 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { 248 if (replay) {
193 u64 tmp, tmp2; 249 __replay_interrupt(replay);
194 lv1_get_version_info(&tmp, &tmp2); 250 return;
195 } 251 }
196 252
253 /* Finally, let's ensure we are hard enabled */
197 __hard_irq_enable(); 254 __hard_irq_enable();
198} 255}
199EXPORT_SYMBOL(arch_local_irq_restore); 256EXPORT_SYMBOL(arch_local_irq_restore);
257
258/*
259 * This is specifically called by assembly code to re-enable interrupts
260 * if they are currently disabled. This is typically called before
261 * schedule() or do_signal() when returning to userspace. We do it
262 * in C to avoid the burden of dealing with lockdep etc...
263 */
264void restore_interrupts(void)
265{
266 if (irqs_disabled())
267 local_irq_enable();
268}
269
200#endif /* CONFIG_PPC64 */ 270#endif /* CONFIG_PPC64 */
201 271
202int arch_show_interrupts(struct seq_file *p, int prec) 272int arch_show_interrupts(struct seq_file *p, int prec)
@@ -364,8 +434,17 @@ void do_IRQ(struct pt_regs *regs)
364 434
365 check_stack_overflow(); 435 check_stack_overflow();
366 436
437 /*
438 * Query the platform PIC for the interrupt & ack it.
439 *
440 * This will typically lower the interrupt line to the CPU
441 */
367 irq = ppc_md.get_irq(); 442 irq = ppc_md.get_irq();
368 443
444 /* We can hard enable interrupts now */
445 may_hard_irq_enable();
446
447 /* And finally process it */
369 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) 448 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
370 handle_one_irq(irq); 449 handle_one_irq(irq);
371 else if (irq != NO_IRQ_IGNORE) 450 else if (irq != NO_IRQ_IGNORE)
@@ -374,15 +453,6 @@ void do_IRQ(struct pt_regs *regs)
374 irq_exit(); 453 irq_exit();
375 set_irq_regs(old_regs); 454 set_irq_regs(old_regs);
376 455
377#ifdef CONFIG_PPC_ISERIES
378 if (firmware_has_feature(FW_FEATURE_ISERIES) &&
379 get_lppaca()->int_dword.fields.decr_int) {
380 get_lppaca()->int_dword.fields.decr_int = 0;
381 /* Signal a fake decrementer interrupt */
382 timer_interrupt(regs);
383 }
384#endif
385
386 trace_irq_exit(regs); 456 trace_irq_exit(regs);
387} 457}
388 458