diff options
Diffstat (limited to 'arch/powerpc/sysdev/uic.c')
-rw-r--r-- | arch/powerpc/sysdev/uic.c | 132 |
1 files changed, 42 insertions, 90 deletions
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c index 847a5496b869..625b275c3795 100644 --- a/arch/powerpc/sysdev/uic.c +++ b/arch/powerpc/sysdev/uic.c | |||
@@ -53,21 +53,23 @@ struct uic { | |||
53 | 53 | ||
54 | /* The remapper for this UIC */ | 54 | /* The remapper for this UIC */ |
55 | struct irq_host *irqhost; | 55 | struct irq_host *irqhost; |
56 | |||
57 | /* For secondary UICs, the cascade interrupt's irqaction */ | ||
58 | struct irqaction cascade; | ||
59 | }; | 56 | }; |
60 | 57 | ||
61 | static void uic_unmask_irq(unsigned int virq) | 58 | static void uic_unmask_irq(unsigned int virq) |
62 | { | 59 | { |
60 | struct irq_desc *desc = get_irq_desc(virq); | ||
63 | struct uic *uic = get_irq_chip_data(virq); | 61 | struct uic *uic = get_irq_chip_data(virq); |
64 | unsigned int src = uic_irq_to_hw(virq); | 62 | unsigned int src = uic_irq_to_hw(virq); |
65 | unsigned long flags; | 63 | unsigned long flags; |
66 | u32 er; | 64 | u32 er, sr; |
67 | 65 | ||
66 | sr = 1 << (31-src); | ||
68 | spin_lock_irqsave(&uic->lock, flags); | 67 | spin_lock_irqsave(&uic->lock, flags); |
68 | /* ack level-triggered interrupts here */ | ||
69 | if (desc->status & IRQ_LEVEL) | ||
70 | mtdcr(uic->dcrbase + UIC_SR, sr); | ||
69 | er = mfdcr(uic->dcrbase + UIC_ER); | 71 | er = mfdcr(uic->dcrbase + UIC_ER); |
70 | er |= 1 << (31 - src); | 72 | er |= sr; |
71 | mtdcr(uic->dcrbase + UIC_ER, er); | 73 | mtdcr(uic->dcrbase + UIC_ER, er); |
72 | spin_unlock_irqrestore(&uic->lock, flags); | 74 | spin_unlock_irqrestore(&uic->lock, flags); |
73 | } | 75 | } |
@@ -99,6 +101,7 @@ static void uic_ack_irq(unsigned int virq) | |||
99 | 101 | ||
100 | static void uic_mask_ack_irq(unsigned int virq) | 102 | static void uic_mask_ack_irq(unsigned int virq) |
101 | { | 103 | { |
104 | struct irq_desc *desc = get_irq_desc(virq); | ||
102 | struct uic *uic = get_irq_chip_data(virq); | 105 | struct uic *uic = get_irq_chip_data(virq); |
103 | unsigned int src = uic_irq_to_hw(virq); | 106 | unsigned int src = uic_irq_to_hw(virq); |
104 | unsigned long flags; | 107 | unsigned long flags; |
@@ -109,7 +112,16 @@ static void uic_mask_ack_irq(unsigned int virq) | |||
109 | er = mfdcr(uic->dcrbase + UIC_ER); | 112 | er = mfdcr(uic->dcrbase + UIC_ER); |
110 | er &= ~sr; | 113 | er &= ~sr; |
111 | mtdcr(uic->dcrbase + UIC_ER, er); | 114 | mtdcr(uic->dcrbase + UIC_ER, er); |
112 | mtdcr(uic->dcrbase + UIC_SR, sr); | 115 | /* On the UIC, acking (i.e. clearing the SR bit) |
116 | * a level irq will have no effect if the interrupt | ||
117 | * is still asserted by the device, even if | ||
118 | * the interrupt is already masked. Therefore | ||
119 | * we only ack the egde interrupts here, while | ||
120 | * level interrupts are ack'ed after the actual | ||
121 | * isr call in the uic_unmask_irq() | ||
122 | */ | ||
123 | if (!(desc->status & IRQ_LEVEL)) | ||
124 | mtdcr(uic->dcrbase + UIC_SR, sr); | ||
113 | spin_unlock_irqrestore(&uic->lock, flags); | 125 | spin_unlock_irqrestore(&uic->lock, flags); |
114 | } | 126 | } |
115 | 127 | ||
@@ -173,64 +185,6 @@ static struct irq_chip uic_irq_chip = { | |||
173 | .set_type = uic_set_irq_type, | 185 | .set_type = uic_set_irq_type, |
174 | }; | 186 | }; |
175 | 187 | ||
176 | /** | ||
177 | * handle_uic_irq - irq flow handler for UIC | ||
178 | * @irq: the interrupt number | ||
179 | * @desc: the interrupt description structure for this irq | ||
180 | * | ||
181 | * This is modified version of the generic handle_level_irq() suitable | ||
182 | * for the UIC. On the UIC, acking (i.e. clearing the SR bit) a level | ||
183 | * irq will have no effect if the interrupt is still asserted by the | ||
184 | * device, even if the interrupt is already masked. Therefore, unlike | ||
185 | * the standard handle_level_irq(), we must ack the interrupt *after* | ||
186 | * invoking the ISR (which should have de-asserted the interrupt in | ||
187 | * the external source). For edge interrupts we ack at the beginning | ||
188 | * instead of the end, to keep the window in which we can miss an | ||
189 | * interrupt as small as possible. | ||
190 | */ | ||
191 | void fastcall handle_uic_irq(unsigned int irq, struct irq_desc *desc) | ||
192 | { | ||
193 | unsigned int cpu = smp_processor_id(); | ||
194 | struct irqaction *action; | ||
195 | irqreturn_t action_ret; | ||
196 | |||
197 | spin_lock(&desc->lock); | ||
198 | if (desc->status & IRQ_LEVEL) | ||
199 | desc->chip->mask(irq); | ||
200 | else | ||
201 | desc->chip->mask_ack(irq); | ||
202 | |||
203 | if (unlikely(desc->status & IRQ_INPROGRESS)) | ||
204 | goto out_unlock; | ||
205 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | ||
206 | kstat_cpu(cpu).irqs[irq]++; | ||
207 | |||
208 | /* | ||
209 | * If its disabled or no action available | ||
210 | * keep it masked and get out of here | ||
211 | */ | ||
212 | action = desc->action; | ||
213 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) { | ||
214 | desc->status |= IRQ_PENDING; | ||
215 | goto out_unlock; | ||
216 | } | ||
217 | |||
218 | desc->status |= IRQ_INPROGRESS; | ||
219 | desc->status &= ~IRQ_PENDING; | ||
220 | spin_unlock(&desc->lock); | ||
221 | |||
222 | action_ret = handle_IRQ_event(irq, action); | ||
223 | |||
224 | spin_lock(&desc->lock); | ||
225 | desc->status &= ~IRQ_INPROGRESS; | ||
226 | if (desc->status & IRQ_LEVEL) | ||
227 | desc->chip->ack(irq); | ||
228 | if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) | ||
229 | desc->chip->unmask(irq); | ||
230 | out_unlock: | ||
231 | spin_unlock(&desc->lock); | ||
232 | } | ||
233 | |||
234 | static int uic_host_map(struct irq_host *h, unsigned int virq, | 188 | static int uic_host_map(struct irq_host *h, unsigned int virq, |
235 | irq_hw_number_t hw) | 189 | irq_hw_number_t hw) |
236 | { | 190 | { |
@@ -239,7 +193,7 @@ static int uic_host_map(struct irq_host *h, unsigned int virq, | |||
239 | set_irq_chip_data(virq, uic); | 193 | set_irq_chip_data(virq, uic); |
240 | /* Despite the name, handle_level_irq() works for both level | 194 | /* Despite the name, handle_level_irq() works for both level |
241 | * and edge irqs on UIC. FIXME: check this is correct */ | 195 | * and edge irqs on UIC. FIXME: check this is correct */ |
242 | set_irq_chip_and_handler(virq, &uic_irq_chip, handle_uic_irq); | 196 | set_irq_chip_and_handler(virq, &uic_irq_chip, handle_level_irq); |
243 | 197 | ||
244 | /* Set default irq type */ | 198 | /* Set default irq type */ |
245 | set_irq_type(virq, IRQ_TYPE_NONE); | 199 | set_irq_type(virq, IRQ_TYPE_NONE); |
@@ -264,23 +218,36 @@ static struct irq_host_ops uic_host_ops = { | |||
264 | .xlate = uic_host_xlate, | 218 | .xlate = uic_host_xlate, |
265 | }; | 219 | }; |
266 | 220 | ||
267 | irqreturn_t uic_cascade(int virq, void *data) | 221 | void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) |
268 | { | 222 | { |
269 | struct uic *uic = data; | 223 | struct uic *uic = get_irq_data(virq); |
270 | u32 msr; | 224 | u32 msr; |
271 | int src; | 225 | int src; |
272 | int subvirq; | 226 | int subvirq; |
273 | 227 | ||
228 | spin_lock(&desc->lock); | ||
229 | if (desc->status & IRQ_LEVEL) | ||
230 | desc->chip->mask(virq); | ||
231 | else | ||
232 | desc->chip->mask_ack(virq); | ||
233 | spin_unlock(&desc->lock); | ||
234 | |||
274 | msr = mfdcr(uic->dcrbase + UIC_MSR); | 235 | msr = mfdcr(uic->dcrbase + UIC_MSR); |
275 | if (!msr) /* spurious interrupt */ | 236 | if (!msr) /* spurious interrupt */ |
276 | return IRQ_HANDLED; | 237 | goto uic_irq_ret; |
277 | 238 | ||
278 | src = 32 - ffs(msr); | 239 | src = 32 - ffs(msr); |
279 | 240 | ||
280 | subvirq = irq_linear_revmap(uic->irqhost, src); | 241 | subvirq = irq_linear_revmap(uic->irqhost, src); |
281 | generic_handle_irq(subvirq); | 242 | generic_handle_irq(subvirq); |
282 | 243 | ||
283 | return IRQ_HANDLED; | 244 | uic_irq_ret: |
245 | spin_lock(&desc->lock); | ||
246 | if (desc->status & IRQ_LEVEL) | ||
247 | desc->chip->ack(virq); | ||
248 | if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) | ||
249 | desc->chip->unmask(virq); | ||
250 | spin_unlock(&desc->lock); | ||
284 | } | 251 | } |
285 | 252 | ||
286 | static struct uic * __init uic_init_one(struct device_node *node) | 253 | static struct uic * __init uic_init_one(struct device_node *node) |
@@ -342,33 +309,27 @@ void __init uic_init_tree(void) | |||
342 | const u32 *interrupts; | 309 | const u32 *interrupts; |
343 | 310 | ||
344 | /* First locate and initialize the top-level UIC */ | 311 | /* First locate and initialize the top-level UIC */ |
345 | 312 | for_each_compatible_node(np, NULL, "ibm,uic") { | |
346 | np = of_find_compatible_node(NULL, NULL, "ibm,uic"); | ||
347 | while (np) { | ||
348 | interrupts = of_get_property(np, "interrupts", NULL); | 313 | interrupts = of_get_property(np, "interrupts", NULL); |
349 | if (! interrupts) | 314 | if (!interrupts) |
350 | break; | 315 | break; |
351 | |||
352 | np = of_find_compatible_node(np, NULL, "ibm,uic"); | ||
353 | } | 316 | } |
354 | 317 | ||
355 | BUG_ON(!np); /* uic_init_tree() assumes there's a UIC as the | 318 | BUG_ON(!np); /* uic_init_tree() assumes there's a UIC as the |
356 | * top-level interrupt controller */ | 319 | * top-level interrupt controller */ |
357 | primary_uic = uic_init_one(np); | 320 | primary_uic = uic_init_one(np); |
358 | if (! primary_uic) | 321 | if (!primary_uic) |
359 | panic("Unable to initialize primary UIC %s\n", np->full_name); | 322 | panic("Unable to initialize primary UIC %s\n", np->full_name); |
360 | 323 | ||
361 | irq_set_default_host(primary_uic->irqhost); | 324 | irq_set_default_host(primary_uic->irqhost); |
362 | of_node_put(np); | 325 | of_node_put(np); |
363 | 326 | ||
364 | /* The scan again for cascaded UICs */ | 327 | /* The scan again for cascaded UICs */ |
365 | np = of_find_compatible_node(NULL, NULL, "ibm,uic"); | 328 | for_each_compatible_node(np, NULL, "ibm,uic") { |
366 | while (np) { | ||
367 | interrupts = of_get_property(np, "interrupts", NULL); | 329 | interrupts = of_get_property(np, "interrupts", NULL); |
368 | if (interrupts) { | 330 | if (interrupts) { |
369 | /* Secondary UIC */ | 331 | /* Secondary UIC */ |
370 | int cascade_virq; | 332 | int cascade_virq; |
371 | int ret; | ||
372 | 333 | ||
373 | uic = uic_init_one(np); | 334 | uic = uic_init_one(np); |
374 | if (! uic) | 335 | if (! uic) |
@@ -377,20 +338,11 @@ void __init uic_init_tree(void) | |||
377 | 338 | ||
378 | cascade_virq = irq_of_parse_and_map(np, 0); | 339 | cascade_virq = irq_of_parse_and_map(np, 0); |
379 | 340 | ||
380 | uic->cascade.handler = uic_cascade; | 341 | set_irq_data(cascade_virq, uic); |
381 | uic->cascade.name = "UIC cascade"; | 342 | set_irq_chained_handler(cascade_virq, uic_irq_cascade); |
382 | uic->cascade.dev_id = uic; | ||
383 | |||
384 | ret = setup_irq(cascade_virq, &uic->cascade); | ||
385 | if (ret) | ||
386 | printk(KERN_ERR "Failed to setup_irq(%d) for " | ||
387 | "UIC%d cascade\n", cascade_virq, | ||
388 | uic->index); | ||
389 | 343 | ||
390 | /* FIXME: setup critical cascade?? */ | 344 | /* FIXME: setup critical cascade?? */ |
391 | } | 345 | } |
392 | |||
393 | np = of_find_compatible_node(np, NULL, "ibm,uic"); | ||
394 | } | 346 | } |
395 | } | 347 | } |
396 | 348 | ||